Merge branch 'msm-next' of git://people.freedesktop.org/~robclark/linux into drm-next
Pretty small pull this time around for msm. Adds some useful debugfs
I'd been carrying around on a branch for a while, plus few fixes. And
Kconfig update for the great ARCH_MSM -> ARCH_QCOM split.
* 'msm-next' of git://people.freedesktop.org/~robclark/linux:
drm/msm: use correct gfp flag for vram allocation
drm/msm/mdp5: fix error return value
drm/msm: remove redundant private plane cleanup
drm/msm: add perf logging debugfs
drm/msm: add rd logging debugfs
drm/msm: update for ARCH_MSM -> ARCH_QCOM
drm/msm/hdmi: use gpio and HPD polling
drm/msm/mdp5: fix crash in error/unload paths
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index c1c9cf2..00f1c25 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -3368,6 +3368,10 @@
with a call to <function>drm_vblank_cleanup</function> in the driver
<methodname>unload</methodname> operation handler.
</para>
+ <sect2>
+ <title>Vertical Blanking and Interrupt Handling Functions Reference</title>
+!Edrivers/gpu/drm/drm_irq.c
+ </sect2>
</sect1>
<!-- Internals: open/close, file operations and ioctls -->
@@ -3710,17 +3714,16 @@
<term>DRM_IOCTL_MODESET_CTL</term>
<listitem>
<para>
- This should be called by application level drivers before and
- after mode setting, since on many devices the vertical blank
- counter is reset at that time. Internally, the DRM snapshots
- the last vblank count when the ioctl is called with the
- _DRM_PRE_MODESET command, so that the counter won't go backwards
- (which is dealt with when _DRM_POST_MODESET is used).
+ This was only used for user-mode-settind drivers around
+ modesetting changes to allow the kernel to update the vblank
+ interrupt after mode setting, since on many devices the vertical
+ blank counter is reset to 0 at some point during modeset. Modern
+ drivers should not call this any more since with kernel mode
+ setting it is a no-op.
</para>
</listitem>
</varlistentry>
</variablelist>
-<!--!Edrivers/char/drm/drm_irq.c-->
</para>
</sect1>
@@ -3783,6 +3786,96 @@
probing, so those sections fully apply.
</para>
</sect2>
+ <sect2>
+ <title>DPIO</title>
+!Pdrivers/gpu/drm/i915/i915_reg.h DPIO
+ <table id="dpiox2">
+ <title>Dual channel PHY (VLV/CHV)</title>
+ <tgroup cols="8">
+ <colspec colname="c0" />
+ <colspec colname="c1" />
+ <colspec colname="c2" />
+ <colspec colname="c3" />
+ <colspec colname="c4" />
+ <colspec colname="c5" />
+ <colspec colname="c6" />
+ <colspec colname="c7" />
+ <spanspec spanname="ch0" namest="c0" nameend="c3" />
+ <spanspec spanname="ch1" namest="c4" nameend="c7" />
+ <spanspec spanname="ch0pcs01" namest="c0" nameend="c1" />
+ <spanspec spanname="ch0pcs23" namest="c2" nameend="c3" />
+ <spanspec spanname="ch1pcs01" namest="c4" nameend="c5" />
+ <spanspec spanname="ch1pcs23" namest="c6" nameend="c7" />
+ <thead>
+ <row>
+ <entry spanname="ch0">CH0</entry>
+ <entry spanname="ch1">CH1</entry>
+ </row>
+ </thead>
+ <tbody valign="top" align="center">
+ <row>
+ <entry spanname="ch0">CMN/PLL/REF</entry>
+ <entry spanname="ch1">CMN/PLL/REF</entry>
+ </row>
+ <row>
+ <entry spanname="ch0pcs01">PCS01</entry>
+ <entry spanname="ch0pcs23">PCS23</entry>
+ <entry spanname="ch1pcs01">PCS01</entry>
+ <entry spanname="ch1pcs23">PCS23</entry>
+ </row>
+ <row>
+ <entry>TX0</entry>
+ <entry>TX1</entry>
+ <entry>TX2</entry>
+ <entry>TX3</entry>
+ <entry>TX0</entry>
+ <entry>TX1</entry>
+ <entry>TX2</entry>
+ <entry>TX3</entry>
+ </row>
+ <row>
+ <entry spanname="ch0">DDI0</entry>
+ <entry spanname="ch1">DDI1</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ <table id="dpiox1">
+ <title>Single channel PHY (CHV)</title>
+ <tgroup cols="4">
+ <colspec colname="c0" />
+ <colspec colname="c1" />
+ <colspec colname="c2" />
+ <colspec colname="c3" />
+ <spanspec spanname="ch0" namest="c0" nameend="c3" />
+ <spanspec spanname="ch0pcs01" namest="c0" nameend="c1" />
+ <spanspec spanname="ch0pcs23" namest="c2" nameend="c3" />
+ <thead>
+ <row>
+ <entry spanname="ch0">CH0</entry>
+ </row>
+ </thead>
+ <tbody valign="top" align="center">
+ <row>
+ <entry spanname="ch0">CMN/PLL/REF</entry>
+ </row>
+ <row>
+ <entry spanname="ch0pcs01">PCS01</entry>
+ <entry spanname="ch0pcs23">PCS23</entry>
+ </row>
+ <row>
+ <entry>TX0</entry>
+ <entry>TX1</entry>
+ <entry>TX2</entry>
+ <entry>TX3</entry>
+ </row>
+ <row>
+ <entry spanname="ch0">DDI2</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ </sect2>
</sect1>
<sect1>
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 6e2537c..f96098f 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -418,7 +418,7 @@
return gmch_ctrl << 25; /* 32 MB units */
}
-static size_t gen8_stolen_size(int num, int slot, int func)
+static size_t __init gen8_stolen_size(int num, int slot, int func)
{
u16 gmch_ctrl;
@@ -428,48 +428,73 @@
return gmch_ctrl << 25; /* 32 MB units */
}
+static size_t __init chv_stolen_size(int num, int slot, int func)
+{
+ u16 gmch_ctrl;
+
+ gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
+ gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
+ gmch_ctrl &= SNB_GMCH_GMS_MASK;
+
+ /*
+ * 0x0 to 0x10: 32MB increments starting at 0MB
+ * 0x11 to 0x16: 4MB increments starting at 8MB
+ * 0x17 to 0x1d: 4MB increments start at 36MB
+ */
+ if (gmch_ctrl < 0x11)
+ return gmch_ctrl << 25;
+ else if (gmch_ctrl < 0x17)
+ return (gmch_ctrl - 0x11 + 2) << 22;
+ else
+ return (gmch_ctrl - 0x17 + 9) << 22;
+}
struct intel_stolen_funcs {
size_t (*size)(int num, int slot, int func);
u32 (*base)(int num, int slot, int func, size_t size);
};
-static const struct intel_stolen_funcs i830_stolen_funcs = {
+static const struct intel_stolen_funcs i830_stolen_funcs __initconst = {
.base = i830_stolen_base,
.size = i830_stolen_size,
};
-static const struct intel_stolen_funcs i845_stolen_funcs = {
+static const struct intel_stolen_funcs i845_stolen_funcs __initconst = {
.base = i845_stolen_base,
.size = i830_stolen_size,
};
-static const struct intel_stolen_funcs i85x_stolen_funcs = {
+static const struct intel_stolen_funcs i85x_stolen_funcs __initconst = {
.base = i85x_stolen_base,
.size = gen3_stolen_size,
};
-static const struct intel_stolen_funcs i865_stolen_funcs = {
+static const struct intel_stolen_funcs i865_stolen_funcs __initconst = {
.base = i865_stolen_base,
.size = gen3_stolen_size,
};
-static const struct intel_stolen_funcs gen3_stolen_funcs = {
+static const struct intel_stolen_funcs gen3_stolen_funcs __initconst = {
.base = intel_stolen_base,
.size = gen3_stolen_size,
};
-static const struct intel_stolen_funcs gen6_stolen_funcs = {
+static const struct intel_stolen_funcs gen6_stolen_funcs __initconst = {
.base = intel_stolen_base,
.size = gen6_stolen_size,
};
-static const struct intel_stolen_funcs gen8_stolen_funcs = {
+static const struct intel_stolen_funcs gen8_stolen_funcs __initconst = {
.base = intel_stolen_base,
.size = gen8_stolen_size,
};
-static struct pci_device_id intel_stolen_ids[] __initdata = {
+static const struct intel_stolen_funcs chv_stolen_funcs __initconst = {
+ .base = intel_stolen_base,
+ .size = chv_stolen_size,
+};
+
+static const struct pci_device_id intel_stolen_ids[] __initconst = {
INTEL_I830_IDS(&i830_stolen_funcs),
INTEL_I845G_IDS(&i845_stolen_funcs),
INTEL_I85X_IDS(&i85x_stolen_funcs),
@@ -495,7 +520,8 @@
INTEL_HSW_D_IDS(&gen6_stolen_funcs),
INTEL_HSW_M_IDS(&gen6_stolen_funcs),
INTEL_BDW_M_IDS(&gen8_stolen_funcs),
- INTEL_BDW_D_IDS(&gen8_stolen_funcs)
+ INTEL_BDW_D_IDS(&gen8_stolen_funcs),
+ INTEL_CHV_IDS(&chv_stolen_funcs),
};
static void __init intel_graphics_stolen(int num, int slot, int func)
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 48e38ba..863db84 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -13,8 +13,7 @@
drm_crtc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
drm_trace_points.o drm_global.o drm_prime.o \
- drm_rect.o drm_vma_manager.o drm_flip_work.o \
- drm_plane_helper.o
+ drm_rect.o drm_vma_manager.o drm_flip_work.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@@ -23,7 +22,8 @@
drm-usb-y := drm_usb.o
-drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o
+drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
+ drm_plane_helper.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index edee61b..f3b98d4 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -54,6 +54,8 @@
mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev->mode_config.connection_mutex);
+
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex);
}
@@ -72,6 +74,8 @@
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
mutex_unlock(&crtc->mutex);
+ mutex_unlock(&dev->mode_config.connection_mutex);
+
mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_modeset_unlock_all);
@@ -93,6 +97,7 @@
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
WARN_ON(!mutex_is_locked(&crtc->mutex));
+ WARN_ON(!mutex_is_locked(&dev->mode_config.connection_mutex));
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
}
EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
@@ -257,26 +262,6 @@
}
/**
- * drm_get_encoder_name - return a string for encoder
- * @encoder: the encoder to get name for
- */
-const char *drm_get_encoder_name(const struct drm_encoder *encoder)
-{
- return encoder->name;
-}
-EXPORT_SYMBOL(drm_get_encoder_name);
-
-/**
- * drm_get_connector_name - return a string for connector
- * @connector: the connector to get name for
- */
-const char *drm_get_connector_name(const struct drm_connector *connector)
-{
- return connector->name;
-}
-EXPORT_SYMBOL(drm_get_connector_name);
-
-/**
* drm_get_connector_status_name - return a string for connector status
* @status: connector status to compute name of
*
@@ -390,6 +375,21 @@
mutex_unlock(&dev->mode_config.idr_mutex);
}
+static struct drm_mode_object *_object_find(struct drm_device *dev,
+ uint32_t id, uint32_t type)
+{
+ struct drm_mode_object *obj = NULL;
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+ obj = idr_find(&dev->mode_config.crtc_idr, id);
+ if (!obj || (type != DRM_MODE_OBJECT_ANY && obj->type != type) ||
+ (obj->id != id))
+ obj = NULL;
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ return obj;
+}
+
/**
* drm_mode_object_find - look up a drm object with static lifetime
* @dev: drm device
@@ -397,7 +397,9 @@
* @type: type of the mode object
*
* Note that framebuffers cannot be looked up with this functions - since those
- * are reference counted, they need special treatment.
+ * are reference counted, they need special treatment. Even with
+ * DRM_MODE_OBJECT_ANY (although that will simply return NULL
+ * rather than WARN_ON()).
*/
struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
uint32_t id, uint32_t type)
@@ -407,13 +409,10 @@
/* Framebuffers are reference counted and need their own lookup
* function.*/
WARN_ON(type == DRM_MODE_OBJECT_FB);
-
- mutex_lock(&dev->mode_config.idr_mutex);
- obj = idr_find(&dev->mode_config.crtc_idr, id);
- if (!obj || (obj->type != type) || (obj->id != id))
+ obj = _object_find(dev, id, type);
+ /* don't leak out unref'd fb's */
+ if (obj && (obj->type == DRM_MODE_OBJECT_FB))
obj = NULL;
- mutex_unlock(&dev->mode_config.idr_mutex);
-
return obj;
}
EXPORT_SYMBOL(drm_mode_object_find);
@@ -519,7 +518,7 @@
*/
void drm_framebuffer_unreference(struct drm_framebuffer *fb)
{
- DRM_DEBUG("FB ID: %d\n", fb->base.id);
+ DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
kref_put(&fb->refcount, drm_framebuffer_free);
}
EXPORT_SYMBOL(drm_framebuffer_unreference);
@@ -532,7 +531,7 @@
*/
void drm_framebuffer_reference(struct drm_framebuffer *fb)
{
- DRM_DEBUG("FB ID: %d\n", fb->base.id);
+ DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
kref_get(&fb->refcount);
}
EXPORT_SYMBOL(drm_framebuffer_reference);
@@ -544,7 +543,7 @@
static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
{
- DRM_DEBUG("FB ID: %d\n", fb->base.id);
+ DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
kref_put(&fb->refcount, drm_framebuffer_free_bug);
}
@@ -1631,7 +1630,7 @@
&dev->mode_config.encoder_list,
head) {
DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
- drm_get_encoder_name(encoder));
+ encoder->name);
if (put_user(encoder->base.id, encoder_id +
copied)) {
ret = -EFAULT;
@@ -1663,7 +1662,7 @@
head) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id,
- drm_get_connector_name(connector));
+ connector->name);
if (put_user(connector->base.id,
connector_id + copied)) {
ret = -EFAULT;
@@ -1712,7 +1711,6 @@
{
struct drm_mode_crtc *crtc_resp = data;
struct drm_crtc *crtc;
- struct drm_mode_object *obj;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
@@ -1720,13 +1718,11 @@
drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
- DRM_MODE_OBJECT_CRTC);
- if (!obj) {
+ crtc = drm_crtc_find(dev, crtc_resp->crtc_id);
+ if (!crtc) {
ret = -ENOENT;
goto out;
}
- crtc = obj_to_crtc(obj);
crtc_resp->x = crtc->x;
crtc_resp->y = crtc->y;
@@ -1780,7 +1776,6 @@
struct drm_file *file_priv)
{
struct drm_mode_get_connector *out_resp = data;
- struct drm_mode_object *obj;
struct drm_connector *connector;
struct drm_display_mode *mode;
int mode_count = 0;
@@ -1803,14 +1798,13 @@
DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev->mode_config.connection_mutex);
- obj = drm_mode_object_find(dev, out_resp->connector_id,
- DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
+ connector = drm_connector_find(dev, out_resp->connector_id);
+ if (!connector) {
ret = -ENOENT;
goto out;
}
- connector = obj_to_connector(obj);
props_count = connector->properties.count;
@@ -1903,6 +1897,7 @@
out_resp->count_encoders = encoders_count;
out:
+ mutex_unlock(&dev->mode_config.connection_mutex);
mutex_unlock(&dev->mode_config.mutex);
return ret;
@@ -1925,7 +1920,6 @@
struct drm_file *file_priv)
{
struct drm_mode_get_encoder *enc_resp = data;
- struct drm_mode_object *obj;
struct drm_encoder *encoder;
int ret = 0;
@@ -1933,13 +1927,11 @@
return -EINVAL;
drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, enc_resp->encoder_id,
- DRM_MODE_OBJECT_ENCODER);
- if (!obj) {
+ encoder = drm_encoder_find(dev, enc_resp->encoder_id);
+ if (!encoder) {
ret = -ENOENT;
goto out;
}
- encoder = obj_to_encoder(obj);
if (encoder->crtc)
enc_resp->crtc_id = encoder->crtc->base.id;
@@ -2037,7 +2029,6 @@
struct drm_file *file_priv)
{
struct drm_mode_get_plane *plane_resp = data;
- struct drm_mode_object *obj;
struct drm_plane *plane;
uint32_t __user *format_ptr;
int ret = 0;
@@ -2046,13 +2037,11 @@
return -EINVAL;
drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, plane_resp->plane_id,
- DRM_MODE_OBJECT_PLANE);
- if (!obj) {
+ plane = drm_plane_find(dev, plane_resp->plane_id);
+ if (!plane) {
ret = -ENOENT;
goto out;
}
- plane = obj_to_plane(obj);
if (plane->crtc)
plane_resp->crtc_id = plane->crtc->base.id;
@@ -2105,7 +2094,6 @@
struct drm_file *file_priv)
{
struct drm_mode_set_plane *plane_req = data;
- struct drm_mode_object *obj;
struct drm_plane *plane;
struct drm_crtc *crtc;
struct drm_framebuffer *fb = NULL, *old_fb = NULL;
@@ -2120,14 +2108,12 @@
* First, find the plane, crtc, and fb objects. If not available,
* we don't bother to call the driver.
*/
- obj = drm_mode_object_find(dev, plane_req->plane_id,
- DRM_MODE_OBJECT_PLANE);
- if (!obj) {
+ plane = drm_plane_find(dev, plane_req->plane_id);
+ if (!plane) {
DRM_DEBUG_KMS("Unknown plane ID %d\n",
plane_req->plane_id);
return -ENOENT;
}
- plane = obj_to_plane(obj);
/* No fb means shut it down */
if (!plane_req->fb_id) {
@@ -2144,15 +2130,13 @@
goto out;
}
- obj = drm_mode_object_find(dev, plane_req->crtc_id,
- DRM_MODE_OBJECT_CRTC);
- if (!obj) {
+ crtc = drm_crtc_find(dev, plane_req->crtc_id);
+ if (!crtc) {
DRM_DEBUG_KMS("Unknown crtc ID %d\n",
plane_req->crtc_id);
ret = -ENOENT;
goto out;
}
- crtc = obj_to_crtc(obj);
fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
if (!fb) {
@@ -2339,7 +2323,6 @@
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_mode_crtc *crtc_req = data;
- struct drm_mode_object *obj;
struct drm_crtc *crtc;
struct drm_connector **connector_set = NULL, *connector;
struct drm_framebuffer *fb = NULL;
@@ -2357,14 +2340,12 @@
return -ERANGE;
drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, crtc_req->crtc_id,
- DRM_MODE_OBJECT_CRTC);
- if (!obj) {
+ crtc = drm_crtc_find(dev, crtc_req->crtc_id);
+ if (!crtc) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
ret = -ENOENT;
goto out;
}
- crtc = obj_to_crtc(obj);
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
if (crtc_req->mode_valid) {
@@ -2447,18 +2428,16 @@
goto out;
}
- obj = drm_mode_object_find(dev, out_id,
- DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
+ connector = drm_connector_find(dev, out_id);
+ if (!connector) {
DRM_DEBUG_KMS("Connector id %d unknown\n",
out_id);
ret = -ENOENT;
goto out;
}
- connector = obj_to_connector(obj);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id,
- drm_get_connector_name(connector));
+ connector->name);
connector_set[i] = connector;
}
@@ -2487,7 +2466,6 @@
struct drm_mode_cursor2 *req,
struct drm_file *file_priv)
{
- struct drm_mode_object *obj;
struct drm_crtc *crtc;
int ret = 0;
@@ -2497,12 +2475,11 @@
if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
return -EINVAL;
- obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
+ crtc = drm_crtc_find(dev, req->crtc_id);
+ if (!crtc) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
return -ENOENT;
}
- crtc = obj_to_crtc(obj);
mutex_lock(&crtc->mutex);
if (req->flags & DRM_MODE_CURSOR_BO) {
@@ -3118,6 +3095,8 @@
if (!property)
return NULL;
+ property->dev = dev;
+
if (num_values) {
property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL);
if (!property->values)
@@ -3138,6 +3117,9 @@
}
list_add_tail(&property->head, &dev->mode_config.property_list);
+
+ WARN_ON(!drm_property_type_valid(property));
+
return property;
fail:
kfree(property->values);
@@ -3238,6 +3220,22 @@
}
EXPORT_SYMBOL(drm_property_create_bitmask);
+static struct drm_property *property_create_range(struct drm_device *dev,
+ int flags, const char *name,
+ uint64_t min, uint64_t max)
+{
+ struct drm_property *property;
+
+ property = drm_property_create(dev, flags, name, 2);
+ if (!property)
+ return NULL;
+
+ property->values[0] = min;
+ property->values[1] = max;
+
+ return property;
+}
+
/**
* drm_property_create - create a new ranged property type
* @dev: drm device
@@ -3260,20 +3258,36 @@
const char *name,
uint64_t min, uint64_t max)
{
+ return property_create_range(dev, DRM_MODE_PROP_RANGE | flags,
+ name, min, max);
+}
+EXPORT_SYMBOL(drm_property_create_range);
+
+struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
+ int flags, const char *name,
+ int64_t min, int64_t max)
+{
+ return property_create_range(dev, DRM_MODE_PROP_SIGNED_RANGE | flags,
+ name, I642U64(min), I642U64(max));
+}
+EXPORT_SYMBOL(drm_property_create_signed_range);
+
+struct drm_property *drm_property_create_object(struct drm_device *dev,
+ int flags, const char *name, uint32_t type)
+{
struct drm_property *property;
- flags |= DRM_MODE_PROP_RANGE;
+ flags |= DRM_MODE_PROP_OBJECT;
- property = drm_property_create(dev, flags, name, 2);
+ property = drm_property_create(dev, flags, name, 1);
if (!property)
return NULL;
- property->values[0] = min;
- property->values[1] = max;
+ property->values[0] = type;
return property;
}
-EXPORT_SYMBOL(drm_property_create_range);
+EXPORT_SYMBOL(drm_property_create_object);
/**
* drm_property_add_enum - add a possible value to an enumeration property
@@ -3295,14 +3309,16 @@
{
struct drm_property_enum *prop_enum;
- if (!(property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)))
+ if (!(drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
+ drm_property_type_is(property, DRM_MODE_PROP_BITMASK)))
return -EINVAL;
/*
* Bitmask enum properties have the additional constraint of values
* from 0 to 63
*/
- if ((property->flags & DRM_MODE_PROP_BITMASK) && (value > 63))
+ if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK) &&
+ (value > 63))
return -EINVAL;
if (!list_empty(&property->enum_blob_list)) {
@@ -3459,7 +3475,6 @@
int drm_mode_getproperty_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
- struct drm_mode_object *obj;
struct drm_mode_get_property *out_resp = data;
struct drm_property *property;
int enum_count = 0;
@@ -3478,17 +3493,17 @@
return -EINVAL;
drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
- if (!obj) {
+ property = drm_property_find(dev, out_resp->prop_id);
+ if (!property) {
ret = -ENOENT;
goto done;
}
- property = obj_to_property(obj);
- if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
+ if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
+ drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
list_for_each_entry(prop_enum, &property->enum_blob_list, head)
enum_count++;
- } else if (property->flags & DRM_MODE_PROP_BLOB) {
+ } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
list_for_each_entry(prop_blob, &property->enum_blob_list, head)
blob_count++;
}
@@ -3510,7 +3525,8 @@
}
out_resp->count_values = value_count;
- if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
+ if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
+ drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
copied = 0;
enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
@@ -3532,7 +3548,7 @@
out_resp->count_enum_blobs = enum_count;
}
- if (property->flags & DRM_MODE_PROP_BLOB) {
+ if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
copied = 0;
blob_id_ptr = (uint32_t __user *)(unsigned long)out_resp->enum_blob_ptr;
@@ -3611,7 +3627,6 @@
int drm_mode_getblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
- struct drm_mode_object *obj;
struct drm_mode_get_blob *out_resp = data;
struct drm_property_blob *blob;
int ret = 0;
@@ -3621,12 +3636,11 @@
return -EINVAL;
drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
- if (!obj) {
+ blob = drm_property_blob_find(dev, out_resp->blob_id);
+ if (!blob) {
ret = -ENOENT;
goto done;
}
- blob = obj_to_blob(obj);
if (out_resp->length == blob->length) {
blob_ptr = (void __user *)(unsigned long)out_resp->data;
@@ -3688,19 +3702,40 @@
{
if (property->flags & DRM_MODE_PROP_IMMUTABLE)
return false;
- if (property->flags & DRM_MODE_PROP_RANGE) {
+
+ if (drm_property_type_is(property, DRM_MODE_PROP_RANGE)) {
if (value < property->values[0] || value > property->values[1])
return false;
return true;
- } else if (property->flags & DRM_MODE_PROP_BITMASK) {
+ } else if (drm_property_type_is(property, DRM_MODE_PROP_SIGNED_RANGE)) {
+ int64_t svalue = U642I64(value);
+ if (svalue < U642I64(property->values[0]) ||
+ svalue > U642I64(property->values[1]))
+ return false;
+ return true;
+ } else if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
int i;
uint64_t valid_mask = 0;
for (i = 0; i < property->num_values; i++)
valid_mask |= (1ULL << property->values[i]);
return !(value & ~valid_mask);
- } else if (property->flags & DRM_MODE_PROP_BLOB) {
+ } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
/* Only the driver knows */
return true;
+ } else if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
+ struct drm_mode_object *obj;
+ /* a zero value for an object property translates to null: */
+ if (value == 0)
+ return true;
+ /*
+ * NOTE: use _object_find() directly to bypass restriction on
+ * looking up refcnt'd objects (ie. fb's). For a refcnt'd
+ * object this could race against object finalization, so it
+ * simply tells us that the object *was* valid. Which is good
+ * enough.
+ */
+ obj = _object_find(property->dev, value, property->values[0]);
+ return obj != NULL;
} else {
int i;
for (i = 0; i < property->num_values; i++)
@@ -4008,7 +4043,6 @@
void *data, struct drm_file *file_priv)
{
struct drm_mode_crtc_lut *crtc_lut = data;
- struct drm_mode_object *obj;
struct drm_crtc *crtc;
void *r_base, *g_base, *b_base;
int size;
@@ -4018,12 +4052,11 @@
return -EINVAL;
drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
+ crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
+ if (!crtc) {
ret = -ENOENT;
goto out;
}
- crtc = obj_to_crtc(obj);
if (crtc->funcs->gamma_set == NULL) {
ret = -ENOSYS;
@@ -4082,7 +4115,6 @@
void *data, struct drm_file *file_priv)
{
struct drm_mode_crtc_lut *crtc_lut = data;
- struct drm_mode_object *obj;
struct drm_crtc *crtc;
void *r_base, *g_base, *b_base;
int size;
@@ -4092,12 +4124,11 @@
return -EINVAL;
drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
+ crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
+ if (!crtc) {
ret = -ENOENT;
goto out;
}
- crtc = obj_to_crtc(obj);
/* memcpy into gamma store */
if (crtc_lut->gamma_size != crtc->gamma_size) {
@@ -4150,7 +4181,6 @@
void *data, struct drm_file *file_priv)
{
struct drm_mode_crtc_page_flip *page_flip = data;
- struct drm_mode_object *obj;
struct drm_crtc *crtc;
struct drm_framebuffer *fb = NULL, *old_fb = NULL;
struct drm_pending_vblank_event *e = NULL;
@@ -4164,10 +4194,9 @@
if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip)
return -EINVAL;
- obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj)
+ crtc = drm_crtc_find(dev, page_flip->crtc_id);
+ if (!crtc)
return -ENOENT;
- crtc = obj_to_crtc(obj);
mutex_lock(&crtc->mutex);
if (crtc->primary->fb == NULL) {
@@ -4618,6 +4647,7 @@
void drm_mode_config_init(struct drm_device *dev)
{
mutex_init(&dev->mode_config.mutex);
+ mutex_init(&dev->mode_config.connection_mutex);
mutex_init(&dev->mode_config.idr_mutex);
mutex_init(&dev->mode_config.fb_lock);
INIT_LIST_HEAD(&dev->mode_config.fb_list);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 54e8fdb..b55d27c 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -89,6 +89,8 @@
struct drm_device *dev = encoder->dev;
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ WARN_ON(!mutex_is_locked(&dev->mode_config.connection_mutex));
+
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->encoder == encoder)
return true;
@@ -330,7 +332,7 @@
continue;
DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
- encoder->base.id, drm_get_encoder_name(encoder),
+ encoder->base.id, encoder->name,
mode->base.id, mode->name);
encoder_funcs = encoder->helper_private;
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
@@ -600,11 +602,11 @@
}
if (new_crtc) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
- connector->base.id, drm_get_connector_name(connector),
+ connector->base.id, connector->name,
new_crtc->base.id);
} else {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
- connector->base.id, drm_get_connector_name(connector));
+ connector->base.id, connector->name);
}
}
@@ -630,7 +632,7 @@
DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
for (i = 0; i < set->num_connectors; i++) {
DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
- drm_get_connector_name(set->connectors[i]));
+ set->connectors[i]->name);
set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
}
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index d74239f..7be2178 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -70,6 +70,8 @@
#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
/* Force 8bpc */
#define EDID_QUIRK_FORCE_8BPC (1 << 8)
+/* Force 12bpc */
+#define EDID_QUIRK_FORCE_12BPC (1 << 9)
struct detailed_mode_closure {
struct drm_connector *connector;
@@ -125,6 +127,9 @@
{ "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
+ /* Sony PVM-2541A does up to 12 bpc, but only reports max 8 bpc */
+ { "SNY", 0x2541, EDID_QUIRK_FORCE_12BPC },
+
/* ViewSonic VA2026w */
{ "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
@@ -1227,7 +1232,7 @@
if (i == 4 && print_bad_edid) {
dev_warn(connector->dev->dev,
"%s: Ignoring invalid EDID block %d.\n",
- drm_get_connector_name(connector), j);
+ connector->name, j);
connector->bad_edid_counter++;
}
@@ -1247,7 +1252,7 @@
carp:
if (print_bad_edid) {
dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
- drm_get_connector_name(connector), j);
+ connector->name, j);
}
connector->bad_edid_counter++;
@@ -3299,6 +3304,8 @@
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->encoder == encoder && connector->eld[0])
return connector;
@@ -3423,16 +3430,116 @@
EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
/**
+ * drm_assign_hdmi_deep_color_info - detect whether monitor supports
+ * hdmi deep color modes and update drm_display_info if so.
+ *
+ * @edid: monitor EDID information
+ * @info: Updated with maximum supported deep color bpc and color format
+ * if deep color supported.
+ *
+ * Parse the CEA extension according to CEA-861-B.
+ * Return true if HDMI deep color supported, false if not or unknown.
+ */
+static bool drm_assign_hdmi_deep_color_info(struct edid *edid,
+ struct drm_display_info *info,
+ struct drm_connector *connector)
+{
+ u8 *edid_ext, *hdmi;
+ int i;
+ int start_offset, end_offset;
+ unsigned int dc_bpc = 0;
+
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
+ return false;
+
+ if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
+ return false;
+
+ /*
+ * Because HDMI identifier is in Vendor Specific Block,
+ * search it from all data blocks of CEA extension.
+ */
+ for_each_cea_db(edid_ext, i, start_offset, end_offset) {
+ if (cea_db_is_hdmi_vsdb(&edid_ext[i])) {
+ /* HDMI supports at least 8 bpc */
+ info->bpc = 8;
+
+ hdmi = &edid_ext[i];
+ if (cea_db_payload_len(hdmi) < 6)
+ return false;
+
+ if (hdmi[6] & DRM_EDID_HDMI_DC_30) {
+ dc_bpc = 10;
+ DRM_DEBUG("%s: HDMI sink does deep color 30.\n",
+ connector->name);
+ }
+
+ if (hdmi[6] & DRM_EDID_HDMI_DC_36) {
+ dc_bpc = 12;
+ DRM_DEBUG("%s: HDMI sink does deep color 36.\n",
+ connector->name);
+ }
+
+ if (hdmi[6] & DRM_EDID_HDMI_DC_48) {
+ dc_bpc = 16;
+ DRM_DEBUG("%s: HDMI sink does deep color 48.\n",
+ connector->name);
+ }
+
+ if (dc_bpc > 0) {
+ DRM_DEBUG("%s: Assigning HDMI sink color depth as %d bpc.\n",
+ connector->name, dc_bpc);
+ info->bpc = dc_bpc;
+
+ /*
+ * Deep color support mandates RGB444 support for all video
+ * modes and forbids YCRCB422 support for all video modes per
+ * HDMI 1.3 spec.
+ */
+ info->color_formats = DRM_COLOR_FORMAT_RGB444;
+
+ /* YCRCB444 is optional according to spec. */
+ if (hdmi[6] & DRM_EDID_HDMI_DC_Y444) {
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+ DRM_DEBUG("%s: HDMI sink does YCRCB444 in deep color.\n",
+ connector->name);
+ }
+
+ /*
+ * Spec says that if any deep color mode is supported at all,
+ * then deep color 36 bit must be supported.
+ */
+ if (!(hdmi[6] & DRM_EDID_HDMI_DC_36)) {
+ DRM_DEBUG("%s: HDMI sink should do DC_36, but does not!\n",
+ connector->name);
+ }
+
+ return true;
+ }
+ else {
+ DRM_DEBUG("%s: No deep color support on this HDMI sink.\n",
+ connector->name);
+ }
+ }
+ }
+
+ return false;
+}
+
+/**
* drm_add_display_info - pull display info out if present
* @edid: EDID data
* @info: display info (attached to connector)
+ * @connector: connector whose edid is used to build display info
*
* Grab any available display info and stuff it into the drm_display_info
* structure that's part of the connector. Useful for tracking bpp and
* color spaces.
*/
static void drm_add_display_info(struct edid *edid,
- struct drm_display_info *info)
+ struct drm_display_info *info,
+ struct drm_connector *connector)
{
u8 *edid_ext;
@@ -3462,6 +3569,9 @@
info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
}
+ /* HDMI deep color modes supported? Assign to info, if so */
+ drm_assign_hdmi_deep_color_info(edid, info, connector);
+
/* Only defined for 1.4 with digital displays */
if (edid->revision < 4)
return;
@@ -3491,6 +3601,9 @@
break;
}
+ DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n",
+ connector->name, info->bpc);
+
info->color_formats |= DRM_COLOR_FORMAT_RGB444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
@@ -3517,7 +3630,7 @@
}
if (!drm_edid_is_valid(edid)) {
dev_warn(connector->dev->dev, "%s: EDID invalid.\n",
- drm_get_connector_name(connector));
+ connector->name);
return 0;
}
@@ -3549,11 +3662,14 @@
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
- drm_add_display_info(edid, &connector->display_info);
+ drm_add_display_info(edid, &connector->display_info, connector);
if (quirks & EDID_QUIRK_FORCE_8BPC)
connector->display_info.bpc = 8;
+ if (quirks & EDID_QUIRK_FORCE_12BPC)
+ connector->display_info.bpc = 12;
+
return num_modes;
}
EXPORT_SYMBOL(drm_add_edid_modes);
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 6e09f61..0a235fe 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -261,7 +261,7 @@
int drm_load_edid_firmware(struct drm_connector *connector)
{
- const char *connector_name = drm_get_connector_name(connector);
+ const char *connector_name = connector->name;
char *edidname = edid_firmware, *last, *colon;
int ret;
struct edid *edid;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index e95ed58..1ddc174 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -120,7 +120,7 @@
mode = &fb_helper_conn->cmdline_mode;
/* do something on return - turn off connector maybe */
- if (fb_get_options(drm_get_connector_name(connector), &option))
+ if (fb_get_options(connector->name, &option))
continue;
if (drm_mode_parse_command_line_for_connector(option,
@@ -142,12 +142,12 @@
}
DRM_INFO("forcing %s connector %s\n",
- drm_get_connector_name(connector), s);
+ connector->name, s);
connector->force = mode->force;
}
DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
- drm_get_connector_name(connector),
+ connector->name,
mode->xres, mode->yres,
mode->refresh_specified ? mode->refresh : 60,
mode->rb ? " reduced blanking" : "",
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 7583767..b565372 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1,6 +1,5 @@
-/**
- * \file drm_irq.c
- * IRQ support
+/*
+ * drm_irq.c IRQ and vblank support
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
@@ -140,33 +139,40 @@
static void vblank_disable_fn(unsigned long arg)
{
- struct drm_device *dev = (struct drm_device *)arg;
+ struct drm_vblank_crtc *vblank = (void *)arg;
+ struct drm_device *dev = vblank->dev;
unsigned long irqflags;
- int i;
+ int crtc = vblank->crtc;
if (!dev->vblank_disable_allowed)
return;
- for (i = 0; i < dev->num_crtcs; i++) {
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- if (atomic_read(&dev->vblank[i].refcount) == 0 &&
- dev->vblank[i].enabled) {
- DRM_DEBUG("disabling vblank on crtc %d\n", i);
- vblank_disable_and_save(dev, i);
- }
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) {
+ DRM_DEBUG("disabling vblank on crtc %d\n", crtc);
+ vblank_disable_and_save(dev, crtc);
}
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
+/**
+ * drm_vblank_cleanup - cleanup vblank support
+ * @dev: DRM device
+ *
+ * This function cleans up any resources allocated in drm_vblank_init.
+ */
void drm_vblank_cleanup(struct drm_device *dev)
{
+ int crtc;
+
/* Bail if the driver didn't call drm_vblank_init() */
if (dev->num_crtcs == 0)
return;
- del_timer_sync(&dev->vblank_disable_timer);
-
- vblank_disable_fn((unsigned long)dev);
+ for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
+ del_timer_sync(&dev->vblank[crtc].disable_timer);
+ vblank_disable_fn((unsigned long)&dev->vblank[crtc]);
+ }
kfree(dev->vblank);
@@ -174,12 +180,20 @@
}
EXPORT_SYMBOL(drm_vblank_cleanup);
+/**
+ * drm_vblank_init - initialize vblank support
+ * @dev: drm_device
+ * @num_crtcs: number of crtcs supported by @dev
+ *
+ * This function initializes vblank support for @num_crtcs display pipelines.
+ *
+ * Returns:
+ * Zero on success or a negative error code on failure.
+ */
int drm_vblank_init(struct drm_device *dev, int num_crtcs)
{
int i, ret = -ENOMEM;
- setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
- (unsigned long)dev);
spin_lock_init(&dev->vbl_lock);
spin_lock_init(&dev->vblank_time_lock);
@@ -189,8 +203,13 @@
if (!dev->vblank)
goto err;
- for (i = 0; i < num_crtcs; i++)
+ for (i = 0; i < num_crtcs; i++) {
+ dev->vblank[i].dev = dev;
+ dev->vblank[i].crtc = i;
init_waitqueue_head(&dev->vblank[i].queue);
+ setup_timer(&dev->vblank[i].disable_timer, vblank_disable_fn,
+ (unsigned long)&dev->vblank[i]);
+ }
DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
@@ -234,13 +253,21 @@
}
/**
- * Install IRQ handler.
- *
- * \param dev DRM device.
+ * drm_irq_install - install IRQ handler
+ * @dev: DRM device
+ * @irq: IRQ number to install the handler for
*
* Initializes the IRQ related data. Installs the handler, calling the driver
- * \c irq_preinstall() and \c irq_postinstall() functions
- * before and after the installation.
+ * irq_preinstall() and irq_postinstall() functions before and after the
+ * installation.
+ *
+ * This is the simplified helper interface provided for drivers with no special
+ * needs. Drivers which need to install interrupt handlers for multiple
+ * interrupts must instead set drm_device->irq_enabled to signal the DRM core
+ * that vblank interrupts are available.
+ *
+ * Returns:
+ * Zero on success or a negative error code on failure.
*/
int drm_irq_install(struct drm_device *dev, int irq)
{
@@ -300,11 +327,20 @@
EXPORT_SYMBOL(drm_irq_install);
/**
- * Uninstall the IRQ handler.
+ * drm_irq_uninstall - uninstall the IRQ handler
+ * @dev: DRM device
*
- * \param dev DRM device.
+ * Calls the driver's irq_uninstall() function and unregisters the IRQ handler.
+ * This should only be called by drivers which used drm_irq_install() to set up
+ * their interrupt handler. Other drivers must only reset
+ * drm_device->irq_enabled to false.
*
- * Calls the driver's \c irq_uninstall() function, and stops the irq.
+ * Note that for kernel modesetting drivers it is a bug if this function fails.
+ * The sanity checks are only to catch buggy user modesetting drivers which call
+ * the same function through an ioctl.
+ *
+ * Returns:
+ * Zero on success or a negative error code on failure.
*/
int drm_irq_uninstall(struct drm_device *dev)
{
@@ -349,7 +385,7 @@
}
EXPORT_SYMBOL(drm_irq_uninstall);
-/**
+/*
* IRQ control ioctl.
*
* \param inode device inode.
@@ -402,15 +438,14 @@
}
/**
- * drm_calc_timestamping_constants - Calculate vblank timestamp constants
- *
- * @crtc drm_crtc whose timestamp constants should be updated.
- * @mode display mode containing the scanout timings
+ * drm_calc_timestamping_constants - calculate vblank timestamp constants
+ * @crtc: drm_crtc whose timestamp constants should be updated.
+ * @mode: display mode containing the scanout timings
*
* Calculate and store various constants which are later
* needed by vblank and swap-completion timestamping, e.g,
* by drm_calc_vbltimestamp_from_scanoutpos(). They are
- * derived from crtc's true scanout timing, so they take
+ * derived from CRTC's true scanout timing, so they take
* things like panel scaling or other adjustments into account.
*/
void drm_calc_timestamping_constants(struct drm_crtc *crtc,
@@ -455,11 +490,22 @@
EXPORT_SYMBOL(drm_calc_timestamping_constants);
/**
- * drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms
- * drivers. Implements calculation of exact vblank timestamps from
- * given drm_display_mode timings and current video scanout position
- * of a crtc. This can be called from within get_vblank_timestamp()
- * implementation of a kms driver to implement the actual timestamping.
+ * drm_calc_vbltimestamp_from_scanoutpos - precise vblank timestamp helper
+ * @dev: DRM device
+ * @crtc: Which CRTC's vblank timestamp to retrieve
+ * @max_error: Desired maximum allowable error in timestamps (nanosecs)
+ * On return contains true maximum error of timestamp
+ * @vblank_time: Pointer to struct timeval which should receive the timestamp
+ * @flags: Flags to pass to driver:
+ * 0 = Default,
+ * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl IRQ handler
+ * @refcrtc: CRTC which defines scanout timing
+ * @mode: mode which defines the scanout timings
+ *
+ * Implements calculation of exact vblank timestamps from given drm_display_mode
+ * timings and current video scanout position of a CRTC. This can be called from
+ * within get_vblank_timestamp() implementation of a kms driver to implement the
+ * actual timestamping.
*
* Should return timestamps conforming to the OML_sync_control OpenML
* extension specification. The timestamp corresponds to the end of
@@ -474,21 +520,11 @@
* returns as no operation if a doublescan or interlaced video mode is
* active. Higher level code is expected to handle this.
*
- * @dev: DRM device.
- * @crtc: Which crtc's vblank timestamp to retrieve.
- * @max_error: Desired maximum allowable error in timestamps (nanosecs).
- * On return contains true maximum error of timestamp.
- * @vblank_time: Pointer to struct timeval which should receive the timestamp.
- * @flags: Flags to pass to driver:
- * 0 = Default.
- * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
- * @refcrtc: drm_crtc* of crtc which defines scanout timing.
- * @mode: mode which defines the scanout timings
- *
- * Returns negative value on error, failure or if not supported in current
+ * Returns:
+ * Negative value on error, failure or if not supported in current
* video mode:
*
- * -EINVAL - Invalid crtc.
+ * -EINVAL - Invalid CRTC.
* -EAGAIN - Temporary unavailable, e.g., called before initial modeset.
* -ENOTSUPP - Function not supported in current display mode.
* -EIO - Failed, e.g., due to failed scanout position query.
@@ -637,23 +673,23 @@
/**
* drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
- * vblank interval.
- *
+ * vblank interval
* @dev: DRM device
- * @crtc: which crtc's vblank timestamp to retrieve
+ * @crtc: which CRTC's vblank timestamp to retrieve
* @tvblank: Pointer to target struct timeval which should receive the timestamp
* @flags: Flags to pass to driver:
- * 0 = Default.
- * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
+ * 0 = Default,
+ * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl IRQ handler
*
* Fetches the system timestamp corresponding to the time of the most recent
- * vblank interval on specified crtc. May call into kms-driver to
+ * vblank interval on specified CRTC. May call into kms-driver to
* compute the timestamp with a high-precision GPU specific method.
*
* Returns zero if timestamp originates from uncorrected do_gettimeofday()
* call, i.e., it isn't very precisely locked to the true vblank.
*
- * Returns non-zero if timestamp is considered to be very precise.
+ * Returns:
+ * Non-zero if timestamp is considered to be very precise, zero otherwise.
*/
u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
struct timeval *tvblank, unsigned flags)
@@ -688,6 +724,9 @@
* Fetches the "cooked" vblank count value that represents the number of
* vblank events since the system was booted, including lost events due to
* modesetting activity.
+ *
+ * Returns:
+ * The software vblank counter.
*/
u32 drm_vblank_count(struct drm_device *dev, int crtc)
{
@@ -706,8 +745,7 @@
* Fetches the "cooked" vblank count value that represents the number of
* vblank events since the system was booted, including lost events due to
* modesetting activity. Returns corresponding system timestamp of the time
- * of the vblank interval that corresponds to the current value vblank counter
- * value.
+ * of the vblank interval that corresponds to the current vblank counter value.
*/
u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
struct timeval *vblanktime)
@@ -836,6 +874,42 @@
}
/**
+ * drm_vblank_enable - enable the vblank interrupt on a CRTC
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ */
+static int drm_vblank_enable(struct drm_device *dev, int crtc)
+{
+ int ret = 0;
+
+ assert_spin_locked(&dev->vbl_lock);
+
+ spin_lock(&dev->vblank_time_lock);
+
+ if (!dev->vblank[crtc].enabled) {
+ /*
+ * Enable vblank irqs under vblank_time_lock protection.
+ * All vblank count & timestamp updates are held off
+ * until we are done reinitializing master counter and
+ * timestamps. Filtercode in drm_handle_vblank() will
+ * prevent double-accounting of same vblank interval.
+ */
+ ret = dev->driver->enable_vblank(dev, crtc);
+ DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
+ if (ret)
+ atomic_dec(&dev->vblank[crtc].refcount);
+ else {
+ dev->vblank[crtc].enabled = true;
+ drm_update_vblank_count(dev, crtc);
+ }
+ }
+
+ spin_unlock(&dev->vblank_time_lock);
+
+ return ret;
+}
+
+/**
* drm_vblank_get - get a reference count on vblank events
* @dev: DRM device
* @crtc: which CRTC to own
@@ -843,36 +917,20 @@
* Acquire a reference count on vblank events to avoid having them disabled
* while in use.
*
- * RETURNS
+ * This is the legacy version of drm_crtc_vblank_get().
+ *
+ * Returns:
* Zero on success, nonzero on failure.
*/
int drm_vblank_get(struct drm_device *dev, int crtc)
{
- unsigned long irqflags, irqflags2;
+ unsigned long irqflags;
int ret = 0;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
if (atomic_add_return(1, &dev->vblank[crtc].refcount) == 1) {
- spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
- if (!dev->vblank[crtc].enabled) {
- /* Enable vblank irqs under vblank_time_lock protection.
- * All vblank count & timestamp updates are held off
- * until we are done reinitializing master counter and
- * timestamps. Filtercode in drm_handle_vblank() will
- * prevent double-accounting of same vblank interval.
- */
- ret = dev->driver->enable_vblank(dev, crtc);
- DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
- crtc, ret);
- if (ret)
- atomic_dec(&dev->vblank[crtc].refcount);
- else {
- dev->vblank[crtc].enabled = true;
- drm_update_vblank_count(dev, crtc);
- }
- }
- spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
+ ret = drm_vblank_enable(dev, crtc);
} else {
if (!dev->vblank[crtc].enabled) {
atomic_dec(&dev->vblank[crtc].refcount);
@@ -886,12 +944,32 @@
EXPORT_SYMBOL(drm_vblank_get);
/**
+ * drm_crtc_vblank_get - get a reference count on vblank events
+ * @crtc: which CRTC to own
+ *
+ * Acquire a reference count on vblank events to avoid having them disabled
+ * while in use.
+ *
+ * This is the native kms version of drm_vblank_off().
+ *
+ * Returns:
+ * Zero on success, nonzero on failure.
+ */
+int drm_crtc_vblank_get(struct drm_crtc *crtc)
+{
+ return drm_vblank_get(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_get);
+
+/**
* drm_vblank_put - give up ownership of vblank events
* @dev: DRM device
* @crtc: which counter to give up
*
* Release ownership of a given vblank counter, turning off interrupts
* if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
+ *
+ * This is the legacy version of drm_crtc_vblank_put().
*/
void drm_vblank_put(struct drm_device *dev, int crtc)
{
@@ -900,17 +978,39 @@
/* Last user schedules interrupt disable */
if (atomic_dec_and_test(&dev->vblank[crtc].refcount) &&
(drm_vblank_offdelay > 0))
- mod_timer(&dev->vblank_disable_timer,
+ mod_timer(&dev->vblank[crtc].disable_timer,
jiffies + ((drm_vblank_offdelay * HZ)/1000));
}
EXPORT_SYMBOL(drm_vblank_put);
/**
+ * drm_crtc_vblank_put - give up ownership of vblank events
+ * @crtc: which counter to give up
+ *
+ * Release ownership of a given vblank counter, turning off interrupts
+ * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
+ *
+ * This is the native kms version of drm_vblank_put().
+ */
+void drm_crtc_vblank_put(struct drm_crtc *crtc)
+{
+ drm_vblank_put(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_put);
+
+/**
* drm_vblank_off - disable vblank events on a CRTC
* @dev: DRM device
* @crtc: CRTC in question
*
- * Caller must hold event lock.
+ * Drivers can use this function to shut down the vblank interrupt handling when
+ * disabling a crtc. This function ensures that the latest vblank frame count is
+ * stored so that drm_vblank_on() can restore it again.
+ *
+ * Drivers must use this function when the hardware vblank counter can get
+ * reset, e.g. when suspending.
+ *
+ * This is the legacy version of drm_crtc_vblank_off().
*/
void drm_vblank_off(struct drm_device *dev, int crtc)
{
@@ -944,12 +1044,87 @@
EXPORT_SYMBOL(drm_vblank_off);
/**
+ * drm_crtc_vblank_off - disable vblank events on a CRTC
+ * @crtc: CRTC in question
+ *
+ * Drivers can use this function to shut down the vblank interrupt handling when
+ * disabling a crtc. This function ensures that the latest vblank frame count is
+ * stored so that drm_vblank_on can restore it again.
+ *
+ * Drivers must use this function when the hardware vblank counter can get
+ * reset, e.g. when suspending.
+ *
+ * This is the native kms version of drm_vblank_off().
+ */
+void drm_crtc_vblank_off(struct drm_crtc *crtc)
+{
+ drm_vblank_off(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_off);
+
+/**
+ * drm_vblank_on - enable vblank events on a CRTC
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ *
+ * This functions restores the vblank interrupt state captured with
+ * drm_vblank_off() again. Note that calls to drm_vblank_on() and
+ * drm_vblank_off() can be unbalanced and so can also be unconditionaly called
+ * in driver load code to reflect the current hardware state of the crtc.
+ *
+ * This is the legacy version of drm_crtc_vblank_on().
+ */
+void drm_vblank_on(struct drm_device *dev, int crtc)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ /* re-enable interrupts if there's are users left */
+ if (atomic_read(&dev->vblank[crtc].refcount) != 0)
+ WARN_ON(drm_vblank_enable(dev, crtc));
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+}
+EXPORT_SYMBOL(drm_vblank_on);
+
+/**
+ * drm_crtc_vblank_on - enable vblank events on a CRTC
+ * @crtc: CRTC in question
+ *
+ * This functions restores the vblank interrupt state captured with
+ * drm_vblank_off() again. Note that calls to drm_vblank_on() and
+ * drm_vblank_off() can be unbalanced and so can also be unconditionaly called
+ * in driver load code to reflect the current hardware state of the crtc.
+ *
+ * This is the native kms version of drm_vblank_on().
+ */
+void drm_crtc_vblank_on(struct drm_crtc *crtc)
+{
+ drm_vblank_on(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_on);
+
+/**
* drm_vblank_pre_modeset - account for vblanks across mode sets
* @dev: DRM device
* @crtc: CRTC in question
*
* Account for vblank events across mode setting events, which will likely
* reset the hardware frame counter.
+ *
+ * This is done by grabbing a temporary vblank reference to ensure that the
+ * vblank interrupt keeps running across the modeset sequence. With this the
+ * software-side vblank frame counting will ensure that there are no jumps or
+ * discontinuities.
+ *
+ * Unfortunately this approach is racy and also doesn't work when the vblank
+ * interrupt stops running, e.g. across system suspend resume. It is therefore
+ * highly recommended that drivers use the newer drm_vblank_off() and
+ * drm_vblank_on() instead. drm_vblank_pre_modeset() only works correctly when
+ * using "cooked" software vblank frame counters and not relying on any hardware
+ * counters.
+ *
+ * Drivers must call drm_vblank_post_modeset() when re-enabling the same crtc
+ * again.
*/
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
{
@@ -971,6 +1146,14 @@
}
EXPORT_SYMBOL(drm_vblank_pre_modeset);
+/**
+ * drm_vblank_post_modeset - undo drm_vblank_pre_modeset changes
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ *
+ * This function again drops the temporary vblank reference acquired in
+ * drm_vblank_pre_modeset.
+ */
void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
{
unsigned long irqflags;
@@ -992,7 +1175,7 @@
}
EXPORT_SYMBOL(drm_vblank_post_modeset);
-/**
+/*
* drm_modeset_ctl - handle vblank event counter changes across mode switch
* @DRM_IOCTL_ARGS: standard ioctl arguments
*
@@ -1105,7 +1288,7 @@
return ret;
}
-/**
+/*
* Wait for VBLANK.
*
* \param inode device inode.
@@ -1116,7 +1299,7 @@
*
* This function enables the vblank interrupt on the pipe requested, then
* sleeps waiting for the requested sequence number to occur, and drops
- * the vblank interrupt refcount afterwards. (vblank irq disable follows that
+ * the vblank interrupt refcount afterwards. (vblank IRQ disable follows that
* after a timeout with no further vblank waits scheduled).
*/
int drm_wait_vblank(struct drm_device *dev, void *data,
@@ -1187,6 +1370,7 @@
DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * HZ,
(((drm_vblank_count(dev, crtc) -
vblwait->request.sequence) <= (1 << 23)) ||
+ !dev->vblank[crtc].enabled ||
!dev->irq_enabled));
if (ret != -EINTR) {
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index d966afa..458d9bf 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -54,6 +54,13 @@
struct drm_connector *connector;
int count = 0;
+ /*
+ * Note: Once we change the plane hooks to more fine-grained locking we
+ * need to grab the connection_mutex here to be able to make these
+ * checks.
+ */
+ WARN_ON(!mutex_is_locked(&dev->mode_config.connection_mutex));
+
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->encoder && connector->encoder->crtc == crtc) {
if (connector_list != NULL && count < num_connectors)
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 79f07f2..d22676b 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -96,7 +96,7 @@
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
- drm_get_connector_name(connector));
+ connector->name);
/* set all modes to the unverified state */
list_for_each_entry(mode, &connector->modes, head)
mode->status = MODE_UNVERIFIED;
@@ -120,7 +120,7 @@
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
- connector->base.id, drm_get_connector_name(connector));
+ connector->base.id, connector->name);
drm_mode_connector_update_edid_property(connector, NULL);
verbose_prune = false;
goto prune;
@@ -168,7 +168,7 @@
drm_mode_sort(&connector->modes);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
- drm_get_connector_name(connector));
+ connector->name);
list_for_each_entry(mode, &connector->modes, head) {
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
drm_mode_debug_printmodeline(mode);
@@ -286,7 +286,7 @@
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
"status updated from %s to %s\n",
connector->base.id,
- drm_get_connector_name(connector),
+ connector->name,
old, new);
changed = true;
@@ -431,7 +431,7 @@
connector->status = connector->funcs->detect(connector, false);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.id,
- drm_get_connector_name(connector),
+ connector->name,
drm_get_connector_status_name(old_status),
drm_get_connector_status_name(connector->status));
if (old_status != connector->status)
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index c22c309..369b262 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -380,9 +380,9 @@
connector->kdev = device_create(drm_class, dev->primary->kdev,
0, connector, "card%d-%s",
- dev->primary->index, drm_get_connector_name(connector));
+ dev->primary->index, connector->name);
DRM_DEBUG("adding \"%s\" to sysfs\n",
- drm_get_connector_name(connector));
+ connector->name);
if (IS_ERR(connector->kdev)) {
DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev));
@@ -460,7 +460,7 @@
if (!connector->kdev)
return;
DRM_DEBUG("removing \"%s\" from sysfs\n",
- drm_get_connector_name(connector));
+ connector->name);
for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
device_remove_file(connector->kdev, &connector_attrs[i]);
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index e4e3c01..437e182 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -5,6 +5,7 @@
depends on (AGP || AGP=n)
select INTEL_GTT
select AGP_INTEL if AGP
+ select INTERVAL_TREE
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
select SHMEM
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b1445b7..7b2f3be 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -18,6 +18,7 @@
# GEM code
i915-y += i915_cmd_parser.o \
i915_gem_context.o \
+ i915_gem_render_state.o \
i915_gem_debug.o \
i915_gem_dmabuf.o \
i915_gem_evict.o \
@@ -26,12 +27,18 @@
i915_gem.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
+ i915_gem_userptr.o \
i915_gpu_error.o \
i915_irq.o \
i915_trace_points.o \
intel_ringbuffer.o \
intel_uncore.o
+# autogenerated null render state
+i915-y += intel_renderstate_gen6.o \
+ intel_renderstate_gen7.o \
+ intel_renderstate_gen8.o
+
# modesetting core code
i915-y += intel_bios.o \
intel_display.o \
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 69d34e4..9d79543 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -498,16 +498,18 @@
return 0;
}
-static bool validate_cmds_sorted(struct intel_ring_buffer *ring)
+static bool validate_cmds_sorted(struct intel_engine_cs *ring,
+ const struct drm_i915_cmd_table *cmd_tables,
+ int cmd_table_count)
{
int i;
bool ret = true;
- if (!ring->cmd_tables || ring->cmd_table_count == 0)
+ if (!cmd_tables || cmd_table_count == 0)
return true;
- for (i = 0; i < ring->cmd_table_count; i++) {
- const struct drm_i915_cmd_table *table = &ring->cmd_tables[i];
+ for (i = 0; i < cmd_table_count; i++) {
+ const struct drm_i915_cmd_table *table = &cmd_tables[i];
u32 previous = 0;
int j;
@@ -550,35 +552,103 @@
return ret;
}
-static bool validate_regs_sorted(struct intel_ring_buffer *ring)
+static bool validate_regs_sorted(struct intel_engine_cs *ring)
{
return check_sorted(ring->id, ring->reg_table, ring->reg_count) &&
check_sorted(ring->id, ring->master_reg_table,
ring->master_reg_count);
}
+struct cmd_node {
+ const struct drm_i915_cmd_descriptor *desc;
+ struct hlist_node node;
+};
+
+/*
+ * Different command ranges have different numbers of bits for the opcode. For
+ * example, MI commands use bits 31:23 while 3D commands use bits 31:16. The
+ * problem is that, for example, MI commands use bits 22:16 for other fields
+ * such as GGTT vs PPGTT bits. If we include those bits in the mask then when
+ * we mask a command from a batch it could hash to the wrong bucket due to
+ * non-opcode bits being set. But if we don't include those bits, some 3D
+ * commands may hash to the same bucket due to not including opcode bits that
+ * make the command unique. For now, we will risk hashing to the same bucket.
+ *
+ * If we attempt to generate a perfect hash, we should be able to look at bits
+ * 31:29 of a command from a batch buffer and use the full mask for that
+ * client. The existing INSTR_CLIENT_MASK/SHIFT defines can be used for this.
+ */
+#define CMD_HASH_MASK STD_MI_OPCODE_MASK
+
+static int init_hash_table(struct intel_engine_cs *ring,
+ const struct drm_i915_cmd_table *cmd_tables,
+ int cmd_table_count)
+{
+ int i, j;
+
+ hash_init(ring->cmd_hash);
+
+ for (i = 0; i < cmd_table_count; i++) {
+ const struct drm_i915_cmd_table *table = &cmd_tables[i];
+
+ for (j = 0; j < table->count; j++) {
+ const struct drm_i915_cmd_descriptor *desc =
+ &table->table[j];
+ struct cmd_node *desc_node =
+ kmalloc(sizeof(*desc_node), GFP_KERNEL);
+
+ if (!desc_node)
+ return -ENOMEM;
+
+ desc_node->desc = desc;
+ hash_add(ring->cmd_hash, &desc_node->node,
+ desc->cmd.value & CMD_HASH_MASK);
+ }
+ }
+
+ return 0;
+}
+
+static void fini_hash_table(struct intel_engine_cs *ring)
+{
+ struct hlist_node *tmp;
+ struct cmd_node *desc_node;
+ int i;
+
+ hash_for_each_safe(ring->cmd_hash, i, tmp, desc_node, node) {
+ hash_del(&desc_node->node);
+ kfree(desc_node);
+ }
+}
+
/**
* i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
* @ring: the ringbuffer to initialize
*
* Optionally initializes fields related to batch buffer command parsing in the
- * struct intel_ring_buffer based on whether the platform requires software
+ * struct intel_engine_cs based on whether the platform requires software
* command parsing.
+ *
+ * Return: non-zero if initialization fails
*/
-void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring)
+int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
{
+ const struct drm_i915_cmd_table *cmd_tables;
+ int cmd_table_count;
+ int ret;
+
if (!IS_GEN7(ring->dev))
- return;
+ return 0;
switch (ring->id) {
case RCS:
if (IS_HASWELL(ring->dev)) {
- ring->cmd_tables = hsw_render_ring_cmds;
- ring->cmd_table_count =
+ cmd_tables = hsw_render_ring_cmds;
+ cmd_table_count =
ARRAY_SIZE(hsw_render_ring_cmds);
} else {
- ring->cmd_tables = gen7_render_cmds;
- ring->cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
+ cmd_tables = gen7_render_cmds;
+ cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
}
ring->reg_table = gen7_render_regs;
@@ -595,17 +665,17 @@
ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
break;
case VCS:
- ring->cmd_tables = gen7_video_cmds;
- ring->cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
+ cmd_tables = gen7_video_cmds;
+ cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
case BCS:
if (IS_HASWELL(ring->dev)) {
- ring->cmd_tables = hsw_blt_ring_cmds;
- ring->cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
+ cmd_tables = hsw_blt_ring_cmds;
+ cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
} else {
- ring->cmd_tables = gen7_blt_cmds;
- ring->cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
+ cmd_tables = gen7_blt_cmds;
+ cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
}
ring->reg_table = gen7_blt_regs;
@@ -622,8 +692,8 @@
ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
break;
case VECS:
- ring->cmd_tables = hsw_vebox_cmds;
- ring->cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
+ cmd_tables = hsw_vebox_cmds;
+ cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
/* VECS can use the same length_mask function as VCS */
ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
@@ -633,18 +703,45 @@
BUG();
}
- BUG_ON(!validate_cmds_sorted(ring));
+ BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
BUG_ON(!validate_regs_sorted(ring));
+
+ ret = init_hash_table(ring, cmd_tables, cmd_table_count);
+ if (ret) {
+ DRM_ERROR("CMD: cmd_parser_init failed!\n");
+ fini_hash_table(ring);
+ return ret;
+ }
+
+ ring->needs_cmd_parser = true;
+
+ return 0;
+}
+
+/**
+ * i915_cmd_parser_fini_ring() - clean up cmd parser related fields
+ * @ring: the ringbuffer to clean up
+ *
+ * Releases any resources related to command parsing that may have been
+ * initialized for the specified ring.
+ */
+void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring)
+{
+ if (!ring->needs_cmd_parser)
+ return;
+
+ fini_hash_table(ring);
}
static const struct drm_i915_cmd_descriptor*
-find_cmd_in_table(const struct drm_i915_cmd_table *table,
+find_cmd_in_table(struct intel_engine_cs *ring,
u32 cmd_header)
{
- int i;
+ struct cmd_node *desc_node;
- for (i = 0; i < table->count; i++) {
- const struct drm_i915_cmd_descriptor *desc = &table->table[i];
+ hash_for_each_possible(ring->cmd_hash, desc_node, node,
+ cmd_header & CMD_HASH_MASK) {
+ const struct drm_i915_cmd_descriptor *desc = desc_node->desc;
u32 masked_cmd = desc->cmd.mask & cmd_header;
u32 masked_value = desc->cmd.value & desc->cmd.mask;
@@ -664,20 +761,16 @@
* ring's default length encoding and returns default_desc.
*/
static const struct drm_i915_cmd_descriptor*
-find_cmd(struct intel_ring_buffer *ring,
+find_cmd(struct intel_engine_cs *ring,
u32 cmd_header,
struct drm_i915_cmd_descriptor *default_desc)
{
+ const struct drm_i915_cmd_descriptor *desc;
u32 mask;
- int i;
- for (i = 0; i < ring->cmd_table_count; i++) {
- const struct drm_i915_cmd_descriptor *desc;
-
- desc = find_cmd_in_table(&ring->cmd_tables[i], cmd_header);
- if (desc)
- return desc;
- }
+ desc = find_cmd_in_table(ring, cmd_header);
+ if (desc)
+ return desc;
mask = ring->get_cmd_length_mask(cmd_header);
if (!mask)
@@ -744,12 +837,11 @@
*
* Return: true if the ring requires software command parsing
*/
-bool i915_needs_cmd_parser(struct intel_ring_buffer *ring)
+bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
- /* No command tables indicates a platform without parsing */
- if (!ring->cmd_tables)
+ if (!ring->needs_cmd_parser)
return false;
/*
@@ -763,7 +855,7 @@
return (i915.enable_cmd_parser == 1);
}
-static bool check_cmd(const struct intel_ring_buffer *ring,
+static bool check_cmd(const struct intel_engine_cs *ring,
const struct drm_i915_cmd_descriptor *desc,
const u32 *cmd,
const bool is_master,
@@ -865,7 +957,7 @@
*
* Return: non-zero if the parser finds violations or otherwise fails
*/
-int i915_parse_cmds(struct intel_ring_buffer *ring,
+int i915_parse_cmds(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
u32 batch_start_offset,
bool is_master)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 18b3565..169fc2d 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -79,7 +79,7 @@
static int i915_capabilities(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
const struct intel_device_info *info = INTEL_INFO(dev);
@@ -172,7 +172,7 @@
seq_printf(m, " (%s)", obj->ring->name);
}
-static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx)
+static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
{
seq_putc(m, ctx->is_initialized ? 'I' : 'i');
seq_putc(m, ctx->remap_slice ? 'R' : 'r');
@@ -181,7 +181,7 @@
static int i915_gem_object_list_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
uintptr_t list = (uintptr_t) node->info_ent->data;
struct list_head *head;
struct drm_device *dev = node->minor->dev;
@@ -239,7 +239,7 @@
static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
@@ -371,7 +371,7 @@
static int i915_gem_object_info(struct seq_file *m, void* data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 count, mappable_count, purgeable_count;
@@ -474,7 +474,7 @@
static int i915_gem_gtt_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
uintptr_t list = (uintptr_t) node->info_ent->data;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -509,12 +509,12 @@
static int i915_gem_pageflip_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
unsigned long flags;
struct intel_crtc *crtc;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ for_each_intel_crtc(dev, crtc) {
const char pipe = pipe_name(crtc->pipe);
const char plane = plane_name(crtc->plane);
struct intel_unpin_work *work;
@@ -559,10 +559,10 @@
static int i915_gem_request_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
struct drm_i915_gem_request *gem_request;
int ret, count, i;
@@ -594,7 +594,7 @@
}
static void i915_ring_seqno_info(struct seq_file *m,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
if (ring->get_seqno) {
seq_printf(m, "Current sequence (%s): %u\n",
@@ -604,10 +604,10 @@
static int i915_gem_seqno_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int ret, i;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -627,10 +627,10 @@
static int i915_interrupt_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int ret, i, pipe;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -638,7 +638,47 @@
return ret;
intel_runtime_pm_get(dev_priv);
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (IS_CHERRYVIEW(dev)) {
+ int i;
+ seq_printf(m, "Master Interrupt Control:\t%08x\n",
+ I915_READ(GEN8_MASTER_IRQ));
+
+ seq_printf(m, "Display IER:\t%08x\n",
+ I915_READ(VLV_IER));
+ seq_printf(m, "Display IIR:\t%08x\n",
+ I915_READ(VLV_IIR));
+ seq_printf(m, "Display IIR_RW:\t%08x\n",
+ I915_READ(VLV_IIR_RW));
+ seq_printf(m, "Display IMR:\t%08x\n",
+ I915_READ(VLV_IMR));
+ for_each_pipe(pipe)
+ seq_printf(m, "Pipe %c stat:\t%08x\n",
+ pipe_name(pipe),
+ I915_READ(PIPESTAT(pipe)));
+
+ seq_printf(m, "Port hotplug:\t%08x\n",
+ I915_READ(PORT_HOTPLUG_EN));
+ seq_printf(m, "DPFLIPSTAT:\t%08x\n",
+ I915_READ(VLV_DPFLIPSTAT));
+ seq_printf(m, "DPINVGTT:\t%08x\n",
+ I915_READ(DPINVGTT));
+
+ for (i = 0; i < 4; i++) {
+ seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
+ i, I915_READ(GEN8_GT_IMR(i)));
+ seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
+ i, I915_READ(GEN8_GT_IIR(i)));
+ seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
+ i, I915_READ(GEN8_GT_IER(i)));
+ }
+
+ seq_printf(m, "PCU interrupt mask:\t%08x\n",
+ I915_READ(GEN8_PCU_IMR));
+ seq_printf(m, "PCU interrupt identity:\t%08x\n",
+ I915_READ(GEN8_PCU_IIR));
+ seq_printf(m, "PCU interrupt enable:\t%08x\n",
+ I915_READ(GEN8_PCU_IER));
+ } else if (INTEL_INFO(dev)->gen >= 8) {
seq_printf(m, "Master Interrupt Control:\t%08x\n",
I915_READ(GEN8_MASTER_IRQ));
@@ -768,7 +808,7 @@
static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int i, ret;
@@ -797,10 +837,10 @@
static int i915_hws_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
const u32 *hws;
int i;
@@ -945,7 +985,7 @@
static int i915_rstdby_delays(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u16 crstanddelay;
@@ -968,7 +1008,7 @@
static int i915_frequency_info(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
@@ -1108,7 +1148,7 @@
static int i915_delayfreq_table(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 delayfreq;
@@ -1139,7 +1179,7 @@
static int i915_inttoext_table(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 inttoext;
@@ -1163,7 +1203,7 @@
static int ironlake_drpc_info(struct seq_file *m)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rgvmodectl, rstdbyctl;
@@ -1233,7 +1273,7 @@
static int vlv_drpc_info(struct seq_file *m)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rpmodectl1, rcctl1;
@@ -1286,7 +1326,7 @@
static int gen6_drpc_info(struct seq_file *m)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
@@ -1385,7 +1425,7 @@
static int i915_drpc_info(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
if (IS_VALLEYVIEW(dev))
@@ -1398,7 +1438,7 @@
static int i915_fbc_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1460,7 +1500,7 @@
static int i915_ips_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1483,7 +1523,7 @@
static int i915_sr_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool sr_enabled = false;
@@ -1509,7 +1549,7 @@
static int i915_emon_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long temp, chipset, gfx;
@@ -1537,7 +1577,7 @@
static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
@@ -1580,7 +1620,7 @@
static int i915_gfxec(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
@@ -1600,7 +1640,7 @@
static int i915_opregion(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
@@ -1628,7 +1668,7 @@
static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct intel_fbdev *ifbdev = NULL;
struct intel_framebuffer *fb;
@@ -1674,11 +1714,11 @@
static int i915_context_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
- struct i915_hw_context *ctx;
+ struct intel_engine_cs *ring;
+ struct intel_context *ctx;
int ret, i;
ret = mutex_lock_interruptible(&dev->mode_config.mutex);
@@ -1718,7 +1758,7 @@
static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0;
@@ -1766,7 +1806,7 @@
static int i915_swizzle_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
@@ -1814,10 +1854,14 @@
static int per_file_ctx(int id, void *ptr, void *data)
{
- struct i915_hw_context *ctx = ptr;
+ struct intel_context *ctx = ptr;
struct seq_file *m = data;
struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx);
+ if (i915_gem_context_is_default(ctx))
+ seq_puts(m, " default context:\n");
+ else
+ seq_printf(m, " context %d:\n", ctx->id);
ppgtt->debug_dump(ppgtt, m);
return 0;
@@ -1826,7 +1870,7 @@
static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
int unused, i;
@@ -1850,7 +1894,7 @@
static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
struct drm_file *file;
int i;
@@ -1877,12 +1921,9 @@
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct i915_hw_ppgtt *pvt_ppgtt;
- pvt_ppgtt = ctx_to_ppgtt(file_priv->private_default_ctx);
seq_printf(m, "proc: %s\n",
get_pid_task(file->pid, PIDTYPE_PID)->comm);
- seq_puts(m, " default context:\n");
idr_for_each(&file_priv->context_idr, per_file_ctx, m);
}
seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
@@ -1890,7 +1931,7 @@
static int i915_ppgtt_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1912,7 +1953,7 @@
static int i915_llc(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2018,7 +2059,7 @@
static int i915_pc8_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2093,7 +2134,7 @@
static int i915_power_domain_info(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
@@ -2148,7 +2189,7 @@
struct intel_crtc *intel_crtc,
struct intel_encoder *intel_encoder)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_crtc *crtc = &intel_crtc->base;
struct intel_connector *intel_connector;
@@ -2156,12 +2197,12 @@
encoder = &intel_encoder->base;
seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
- encoder->base.id, drm_get_encoder_name(encoder));
+ encoder->base.id, encoder->name);
for_each_connector_on_encoder(dev, encoder, intel_connector) {
struct drm_connector *connector = &intel_connector->base;
seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
connector->base.id,
- drm_get_connector_name(connector),
+ connector->name,
drm_get_connector_status_name(connector->status));
if (connector->status == connector_status_connected) {
struct drm_display_mode *mode = &crtc->mode;
@@ -2175,7 +2216,7 @@
static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_crtc *crtc = &intel_crtc->base;
struct intel_encoder *intel_encoder;
@@ -2232,7 +2273,7 @@
struct drm_display_mode *mode;
seq_printf(m, "connector %d: type %s, status: %s\n",
- connector->base.id, drm_get_connector_name(connector),
+ connector->base.id, connector->name,
drm_get_connector_status_name(connector->status));
if (connector->status == connector_status_connected) {
seq_printf(m, "\tname: %s\n", connector->display_info.name);
@@ -2264,10 +2305,8 @@
if (IS_845G(dev) || IS_I865G(dev))
state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
- else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev))
- state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
else
- state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
+ state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
return state;
}
@@ -2277,10 +2316,7 @@
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pos;
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
- pos = I915_READ(CURPOS_IVB(pipe));
- else
- pos = I915_READ(CURPOS(pipe));
+ pos = I915_READ(CURPOS(pipe));
*x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
@@ -2295,7 +2331,7 @@
static int i915_display_info(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc;
@@ -2305,7 +2341,7 @@
drm_modeset_lock_all(dev);
seq_printf(m, "CRTC info\n");
seq_printf(m, "---------\n");
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ for_each_intel_crtc(dev, crtc) {
bool active;
int x, y;
@@ -2573,7 +2609,7 @@
*source = INTEL_PIPE_CRC_SOURCE_PIPE;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
if (!encoder->base.crtc)
@@ -2609,7 +2645,7 @@
break;
}
}
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -3084,7 +3120,7 @@
static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
{
struct drm_device *dev = m->private;
- int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4;
+ int num_levels = ilk_wm_max_level(dev) + 1;
int level;
drm_modeset_lock_all(dev);
@@ -3167,7 +3203,7 @@
struct seq_file *m = file->private_data;
struct drm_device *dev = m->private;
uint16_t new[5] = { 0 };
- int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4;
+ int num_levels = ilk_wm_max_level(dev) + 1;
int level;
int ret;
char tmp[32];
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index d02c8de..4e70de6 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -44,6 +44,7 @@
#include <acpi/video.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/oom.h>
#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
@@ -63,7 +64,7 @@
* has access to the ring.
*/
#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
- if (LP_RING(dev->dev_private)->obj == NULL) \
+ if (LP_RING(dev->dev_private)->buffer->obj == NULL) \
LOCK_TEST_WITH_RETURN(dev, file); \
} while (0)
@@ -119,7 +120,7 @@
static void i915_free_hws(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
+ struct intel_engine_cs *ring = LP_RING(dev_priv);
if (dev_priv->status_page_dmah) {
drm_pci_free(dev, dev_priv->status_page_dmah);
@@ -139,7 +140,8 @@
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
+ struct intel_engine_cs *ring = LP_RING(dev_priv);
+ struct intel_ringbuffer *ringbuf = ring->buffer;
/*
* We should never lose context on the ring with modesetting
@@ -148,17 +150,17 @@
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
- ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
- ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
- if (ring->space < 0)
- ring->space += ring->size;
+ ringbuf->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+ ringbuf->space = ringbuf->head - (ringbuf->tail + I915_RING_FREE_SPACE);
+ if (ringbuf->space < 0)
+ ringbuf->space += ringbuf->size;
if (!dev->primary->master)
return;
master_priv = dev->primary->master->driver_priv;
- if (ring->head == ring->tail && master_priv->sarea_priv)
+ if (ringbuf->head == ringbuf->tail && master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
}
@@ -201,7 +203,7 @@
}
if (init->ring_size != 0) {
- if (LP_RING(dev_priv)->obj != NULL) {
+ if (LP_RING(dev_priv)->buffer->obj != NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("Client tried to initialize ringbuffer in "
"GEM mode\n");
@@ -234,11 +236,11 @@
static int i915_dma_resume(struct drm_device * dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
+ struct intel_engine_cs *ring = LP_RING(dev_priv);
DRM_DEBUG_DRIVER("%s\n", __func__);
- if (ring->virtual_start == NULL) {
+ if (ring->buffer->virtual_start == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
@@ -360,7 +362,7 @@
struct drm_i915_private *dev_priv = dev->dev_private;
int i, ret;
- if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
+ if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->buffer->size - 8)
return -EINVAL;
for (i = 0; i < dwords;) {
@@ -782,7 +784,7 @@
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret = 0;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
+ struct intel_engine_cs *ring = LP_RING(dev_priv);
DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
@@ -823,7 +825,7 @@
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
- if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
+ if (!dev_priv || !LP_RING(dev_priv)->buffer->virtual_start) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -1073,7 +1075,7 @@
{
struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_hws_addr_t *hws = data;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
@@ -1570,7 +1572,6 @@
spin_lock_init(&dev_priv->backlight_lock);
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
- dev_priv->ring_index = 0;
mutex_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->modeset_restore_lock);
@@ -1741,8 +1742,8 @@
intel_power_domains_remove(dev_priv);
drm_vblank_cleanup(dev);
out_gem_unload:
- if (dev_priv->mm.inactive_shrinker.scan_objects)
- unregister_shrinker(&dev_priv->mm.inactive_shrinker);
+ WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
+ unregister_shrinker(&dev_priv->mm.shrinker);
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
@@ -1793,8 +1794,8 @@
i915_teardown_sysfs(dev);
- if (dev_priv->mm.inactive_shrinker.scan_objects)
- unregister_shrinker(&dev_priv->mm.inactive_shrinker);
+ WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
+ unregister_shrinker(&dev_priv->mm.shrinker);
io_mapping_free(dev_priv->gtt.mappable);
arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1867,7 +1868,7 @@
kmem_cache_destroy(dev_priv->slab);
pci_dev_put(dev_priv->bridge_dev);
- kfree(dev->dev_private);
+ kfree(dev_priv);
return 0;
}
@@ -1983,6 +1984,7 @@
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 208e185..5b5b82c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -36,6 +36,7 @@
#include <linux/console.h>
#include <linux/module.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
static struct drm_driver driver;
@@ -49,12 +50,30 @@
.dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
+#define GEN_CHV_PIPEOFFSETS \
+ .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
+ CHV_PIPE_C_OFFSET }, \
+ .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
+ CHV_TRANSCODER_C_OFFSET, }, \
+ .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET, \
+ CHV_DPLL_C_OFFSET }, \
+ .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET, \
+ CHV_DPLL_C_MD_OFFSET }, \
+ .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
+ CHV_PALETTE_C_OFFSET }
+
+#define CURSOR_OFFSETS \
+ .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
+
+#define IVB_CURSOR_OFFSETS \
+ .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
static const struct intel_device_info intel_i830_info = {
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_845g_info = {
@@ -62,6 +81,7 @@
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i85x_info = {
@@ -71,6 +91,7 @@
.has_fbc = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i865g_info = {
@@ -78,6 +99,7 @@
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i915g_info = {
@@ -85,6 +107,7 @@
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i915gm_info = {
.gen = 3, .is_mobile = 1, .num_pipes = 2,
@@ -94,12 +117,14 @@
.has_fbc = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i945g_info = {
.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i945gm_info = {
.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
@@ -109,6 +134,7 @@
.has_fbc = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i965g_info = {
@@ -117,6 +143,7 @@
.has_overlay = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i965gm_info = {
@@ -126,6 +153,7 @@
.supports_tv = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_g33_info = {
@@ -134,6 +162,7 @@
.has_overlay = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_g45_info = {
@@ -141,6 +170,7 @@
.has_pipe_cxsr = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_gm45_info = {
@@ -150,6 +180,7 @@
.supports_tv = 1,
.ring_mask = RENDER_RING | BSD_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_pineview_info = {
@@ -157,6 +188,7 @@
.need_gfx_hws = 1, .has_hotplug = 1,
.has_overlay = 1,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_ironlake_d_info = {
@@ -164,6 +196,7 @@
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_ironlake_m_info = {
@@ -172,6 +205,7 @@
.has_fbc = 1,
.ring_mask = RENDER_RING | BSD_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_sandybridge_d_info = {
@@ -181,6 +215,7 @@
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.has_llc = 1,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_sandybridge_m_info = {
@@ -190,6 +225,7 @@
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.has_llc = 1,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
#define GEN7_FEATURES \
@@ -203,6 +239,7 @@
GEN7_FEATURES,
.is_ivybridge = 1,
GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
};
static const struct intel_device_info intel_ivybridge_m_info = {
@@ -210,6 +247,7 @@
.is_ivybridge = 1,
.is_mobile = 1,
GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
};
static const struct intel_device_info intel_ivybridge_q_info = {
@@ -217,6 +255,7 @@
.is_ivybridge = 1,
.num_pipes = 0, /* legal, last one wins */
GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
};
static const struct intel_device_info intel_valleyview_m_info = {
@@ -228,6 +267,7 @@
.has_fbc = 0, /* legal, last one wins */
.has_llc = 0, /* legal, last one wins */
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_valleyview_d_info = {
@@ -238,6 +278,7 @@
.has_fbc = 0, /* legal, last one wins */
.has_llc = 0, /* legal, last one wins */
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_haswell_d_info = {
@@ -247,6 +288,7 @@
.has_fpga_dbg = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
};
static const struct intel_device_info intel_haswell_m_info = {
@@ -257,6 +299,7 @@
.has_fpga_dbg = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
};
static const struct intel_device_info intel_broadwell_d_info = {
@@ -267,6 +310,7 @@
.has_ddi = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
};
static const struct intel_device_info intel_broadwell_m_info = {
@@ -297,6 +341,18 @@
.has_ddi = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_cherryview_info = {
+ .is_preliminary = 1,
+ .gen = 8, .num_pipes = 3,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+ .is_valleyview = 1,
+ .display_mmio_offset = VLV_DISPLAY_BASE,
+ GEN_CHV_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
/*
@@ -334,7 +390,8 @@
INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), \
INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \
INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
- INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info)
+ INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
+ INTEL_CHV_IDS(&intel_cherryview_info)
static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_PCI_IDS,
@@ -467,18 +524,20 @@
return error;
}
- cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
-
drm_irq_uninstall(dev);
dev_priv->enable_hotplug_processing = false;
+
+ intel_disable_gt_powersave(dev);
+
/*
* Disable CRTCs directly since we want to preserve sw state
* for _thaw.
*/
- mutex_lock(&dev->mode_config.mutex);
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ drm_modeset_lock_all(dev);
+ for_each_crtc(dev, crtc) {
dev_priv->display.crtc_disable(crtc);
- mutex_unlock(&dev->mode_config.mutex);
+ }
+ drm_modeset_unlock_all(dev);
intel_modeset_suspend_hw(dev);
}
@@ -541,24 +600,6 @@
console_unlock();
}
-static void intel_resume_hotplug(struct drm_device *dev)
-{
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct intel_encoder *encoder;
-
- mutex_lock(&mode_config->mutex);
- DRM_DEBUG_KMS("running encoder hotplug functions\n");
-
- list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
- if (encoder->hot_plug)
- encoder->hot_plug(encoder);
-
- mutex_unlock(&mode_config->mutex);
-
- /* Just fire off a uevent and let userspace tell us what to do */
- drm_helper_hpd_irq_event(dev);
-}
-
static int i915_drm_thaw_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -614,7 +655,7 @@
intel_hpd_init(dev);
dev_priv->enable_hotplug_processing = true;
/* Config may have changed between suspend and resume */
- intel_resume_hotplug(dev);
+ drm_helper_hpd_irq_event(dev);
}
intel_opregion_init(dev);
@@ -916,21 +957,219 @@
return i915_drm_freeze(drm_dev);
}
-static void hsw_runtime_suspend(struct drm_i915_private *dev_priv)
+static int hsw_runtime_suspend(struct drm_i915_private *dev_priv)
{
hsw_enable_pc8(dev_priv);
+
+ return 0;
}
-static void snb_runtime_resume(struct drm_i915_private *dev_priv)
+static int snb_runtime_resume(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
intel_init_pch_refclk(dev);
+
+ return 0;
}
-static void hsw_runtime_resume(struct drm_i915_private *dev_priv)
+static int hsw_runtime_resume(struct drm_i915_private *dev_priv)
{
hsw_disable_pc8(dev_priv);
+
+ return 0;
+}
+
+/*
+ * Save all Gunit registers that may be lost after a D3 and a subsequent
+ * S0i[R123] transition. The list of registers needing a save/restore is
+ * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
+ * registers in the following way:
+ * - Driver: saved/restored by the driver
+ * - Punit : saved/restored by the Punit firmware
+ * - No, w/o marking: no need to save/restore, since the register is R/O or
+ * used internally by the HW in a way that doesn't depend
+ * keeping the content across a suspend/resume.
+ * - Debug : used for debugging
+ *
+ * We save/restore all registers marked with 'Driver', with the following
+ * exceptions:
+ * - Registers out of use, including also registers marked with 'Debug'.
+ * These have no effect on the driver's operation, so we don't save/restore
+ * them to reduce the overhead.
+ * - Registers that are fully setup by an initialization function called from
+ * the resume path. For example many clock gating and RPS/RC6 registers.
+ * - Registers that provide the right functionality with their reset defaults.
+ *
+ * TODO: Except for registers that based on the above 3 criteria can be safely
+ * ignored, we save/restore all others, practically treating the HW context as
+ * a black-box for the driver. Further investigation is needed to reduce the
+ * saved/restored registers even further, by following the same 3 criteria.
+ */
+static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
+{
+ struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
+ int i;
+
+ /* GAM 0x4000-0x4770 */
+ s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
+ s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
+ s->arb_mode = I915_READ(ARB_MODE);
+ s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
+ s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
+
+ for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
+ s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
+
+ s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
+ s->gfx_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
+
+ s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
+ s->ecochk = I915_READ(GAM_ECOCHK);
+ s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
+ s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
+
+ s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
+
+ /* MBC 0x9024-0x91D0, 0x8500 */
+ s->g3dctl = I915_READ(VLV_G3DCTL);
+ s->gsckgctl = I915_READ(VLV_GSCKGCTL);
+ s->mbctl = I915_READ(GEN6_MBCTL);
+
+ /* GCP 0x9400-0x9424, 0x8100-0x810C */
+ s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
+ s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
+ s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
+ s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
+ s->rstctl = I915_READ(GEN6_RSTCTL);
+ s->misccpctl = I915_READ(GEN7_MISCCPCTL);
+
+ /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
+ s->gfxpause = I915_READ(GEN6_GFXPAUSE);
+ s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
+ s->rpdeuc = I915_READ(GEN6_RPDEUC);
+ s->ecobus = I915_READ(ECOBUS);
+ s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
+ s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
+ s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
+ s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
+ s->rcedata = I915_READ(VLV_RCEDATA);
+ s->spare2gh = I915_READ(VLV_SPAREG2H);
+
+ /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
+ s->gt_imr = I915_READ(GTIMR);
+ s->gt_ier = I915_READ(GTIER);
+ s->pm_imr = I915_READ(GEN6_PMIMR);
+ s->pm_ier = I915_READ(GEN6_PMIER);
+
+ for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
+ s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4);
+
+ /* GT SA CZ domain, 0x100000-0x138124 */
+ s->tilectl = I915_READ(TILECTL);
+ s->gt_fifoctl = I915_READ(GTFIFOCTL);
+ s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
+ s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
+ s->pmwgicz = I915_READ(VLV_PMWGICZ);
+
+ /* Gunit-Display CZ domain, 0x182028-0x1821CF */
+ s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
+ s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
+ s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
+
+ /*
+ * Not saving any of:
+ * DFT, 0x9800-0x9EC0
+ * SARB, 0xB000-0xB1FC
+ * GAC, 0x5208-0x524C, 0x14000-0x14C000
+ * PCI CFG
+ */
+}
+
+static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
+{
+ struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
+ u32 val;
+ int i;
+
+ /* GAM 0x4000-0x4770 */
+ I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
+ I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
+ I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
+ I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
+ I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
+
+ for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
+ I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
+
+ I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
+ I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
+
+ I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
+ I915_WRITE(GAM_ECOCHK, s->ecochk);
+ I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
+ I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
+
+ I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
+
+ /* MBC 0x9024-0x91D0, 0x8500 */
+ I915_WRITE(VLV_G3DCTL, s->g3dctl);
+ I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
+ I915_WRITE(GEN6_MBCTL, s->mbctl);
+
+ /* GCP 0x9400-0x9424, 0x8100-0x810C */
+ I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
+ I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
+ I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
+ I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
+ I915_WRITE(GEN6_RSTCTL, s->rstctl);
+ I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
+
+ /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
+ I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
+ I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
+ I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
+ I915_WRITE(ECOBUS, s->ecobus);
+ I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
+ I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
+ I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
+ I915_WRITE(VLV_RCEDATA, s->rcedata);
+ I915_WRITE(VLV_SPAREG2H, s->spare2gh);
+
+ /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
+ I915_WRITE(GTIMR, s->gt_imr);
+ I915_WRITE(GTIER, s->gt_ier);
+ I915_WRITE(GEN6_PMIMR, s->pm_imr);
+ I915_WRITE(GEN6_PMIER, s->pm_ier);
+
+ for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
+ I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]);
+
+ /* GT SA CZ domain, 0x100000-0x138124 */
+ I915_WRITE(TILECTL, s->tilectl);
+ I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
+ /*
+ * Preserve the GT allow wake and GFX force clock bit, they are not
+ * be restored, as they are used to control the s0ix suspend/resume
+ * sequence by the caller.
+ */
+ val = I915_READ(VLV_GTLC_WAKE_CTRL);
+ val &= VLV_GTLC_ALLOWWAKEREQ;
+ val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
+ I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
+
+ val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
+ val &= VLV_GFX_CLK_FORCE_ON_BIT;
+ val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
+ I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
+
+ I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
+
+ /* Gunit-Display CZ domain, 0x182028-0x1821CF */
+ I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
+ I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
+ I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
}
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
@@ -970,11 +1209,143 @@
#undef COND
}
+static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
+{
+ u32 val;
+ int err = 0;
+
+ val = I915_READ(VLV_GTLC_WAKE_CTRL);
+ val &= ~VLV_GTLC_ALLOWWAKEREQ;
+ if (allow)
+ val |= VLV_GTLC_ALLOWWAKEREQ;
+ I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
+ POSTING_READ(VLV_GTLC_WAKE_CTRL);
+
+#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
+ allow)
+ err = wait_for(COND, 1);
+ if (err)
+ DRM_ERROR("timeout disabling GT waking\n");
+ return err;
+#undef COND
+}
+
+static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
+ bool wait_for_on)
+{
+ u32 mask;
+ u32 val;
+ int err;
+
+ mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
+ val = wait_for_on ? mask : 0;
+#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
+ if (COND)
+ return 0;
+
+ DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
+ wait_for_on ? "on" : "off",
+ I915_READ(VLV_GTLC_PW_STATUS));
+
+ /*
+ * RC6 transitioning can be delayed up to 2 msec (see
+ * valleyview_enable_rps), use 3 msec for safety.
+ */
+ err = wait_for(COND, 3);
+ if (err)
+ DRM_ERROR("timeout waiting for GT wells to go %s\n",
+ wait_for_on ? "on" : "off");
+
+ return err;
+#undef COND
+}
+
+static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
+{
+ if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
+ return;
+
+ DRM_ERROR("GT register access while GT waking disabled\n");
+ I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
+}
+
+static int vlv_runtime_suspend(struct drm_i915_private *dev_priv)
+{
+ u32 mask;
+ int err;
+
+ /*
+ * Bspec defines the following GT well on flags as debug only, so
+ * don't treat them as hard failures.
+ */
+ (void)vlv_wait_for_gt_wells(dev_priv, false);
+
+ mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
+ WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
+
+ vlv_check_no_gt_access(dev_priv);
+
+ err = vlv_force_gfx_clock(dev_priv, true);
+ if (err)
+ goto err1;
+
+ err = vlv_allow_gt_wake(dev_priv, false);
+ if (err)
+ goto err2;
+ vlv_save_gunit_s0ix_state(dev_priv);
+
+ err = vlv_force_gfx_clock(dev_priv, false);
+ if (err)
+ goto err2;
+
+ return 0;
+
+err2:
+ /* For safety always re-enable waking and disable gfx clock forcing */
+ vlv_allow_gt_wake(dev_priv, true);
+err1:
+ vlv_force_gfx_clock(dev_priv, false);
+
+ return err;
+}
+
+static int vlv_runtime_resume(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ int err;
+ int ret;
+
+ /*
+ * If any of the steps fail just try to continue, that's the best we
+ * can do at this point. Return the first error code (which will also
+ * leave RPM permanently disabled).
+ */
+ ret = vlv_force_gfx_clock(dev_priv, true);
+
+ vlv_restore_gunit_s0ix_state(dev_priv);
+
+ err = vlv_allow_gt_wake(dev_priv, true);
+ if (!ret)
+ ret = err;
+
+ err = vlv_force_gfx_clock(dev_priv, false);
+ if (!ret)
+ ret = err;
+
+ vlv_check_no_gt_access(dev_priv);
+
+ intel_init_clock_gating(dev);
+ i915_gem_restore_fences(dev);
+
+ return ret;
+}
+
static int intel_runtime_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
return -ENODEV;
@@ -985,6 +1356,30 @@
DRM_DEBUG_KMS("Suspending device\n");
/*
+ * We could deadlock here in case another thread holding struct_mutex
+ * calls RPM suspend concurrently, since the RPM suspend will wait
+ * first for this RPM suspend to finish. In this case the concurrent
+ * RPM resume will be followed by its RPM suspend counterpart. Still
+ * for consistency return -EAGAIN, which will reschedule this suspend.
+ */
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
+ /*
+ * Bump the expiration timestamp, otherwise the suspend won't
+ * be rescheduled.
+ */
+ pm_runtime_mark_last_busy(device);
+
+ return -EAGAIN;
+ }
+ /*
+ * We are safe here against re-faults, since the fault handler takes
+ * an RPM reference.
+ */
+ i915_gem_release_all_mmaps(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ /*
* rps.work can't be rearmed here, since we get here only after making
* sure the GPU is idle and the RPS freq is set to the minimum. See
* intel_mark_idle().
@@ -992,14 +1387,23 @@
cancel_work_sync(&dev_priv->rps.work);
intel_runtime_pm_disable_interrupts(dev);
- if (IS_GEN6(dev))
- ;
- else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
- hsw_runtime_suspend(dev_priv);
- else
+ if (IS_GEN6(dev)) {
+ ret = 0;
+ } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ ret = hsw_runtime_suspend(dev_priv);
+ } else if (IS_VALLEYVIEW(dev)) {
+ ret = vlv_runtime_suspend(dev_priv);
+ } else {
+ ret = -ENODEV;
WARN_ON(1);
+ }
- i915_gem_release_all_mmaps(dev_priv);
+ if (ret) {
+ DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
+ intel_runtime_pm_restore_interrupts(dev);
+
+ return ret;
+ }
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
dev_priv->pm.suspended = true;
@@ -1022,6 +1426,7 @@
struct pci_dev *pdev = to_pci_dev(device);
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
WARN_ON(!HAS_RUNTIME_PM(dev));
@@ -1030,21 +1435,33 @@
intel_opregion_notify_adapter(dev, PCI_D0);
dev_priv->pm.suspended = false;
- if (IS_GEN6(dev))
- snb_runtime_resume(dev_priv);
- else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
- hsw_runtime_resume(dev_priv);
- else
+ if (IS_GEN6(dev)) {
+ ret = snb_runtime_resume(dev_priv);
+ } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ ret = hsw_runtime_resume(dev_priv);
+ } else if (IS_VALLEYVIEW(dev)) {
+ ret = vlv_runtime_resume(dev_priv);
+ } else {
WARN_ON(1);
+ ret = -ENODEV;
+ }
+ /*
+ * No point of rolling back things in case of an error, as the best
+ * we can do is to hope that things will still work (and disable RPM).
+ */
i915_gem_init_swizzling(dev);
gen6_update_ring_freq(dev);
intel_runtime_pm_restore_interrupts(dev);
intel_reset_gt_powersave(dev);
- DRM_DEBUG_KMS("Device resumed\n");
- return 0;
+ if (ret)
+ DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
+ else
+ DRM_DEBUG_KMS("Device resumed\n");
+
+ return ret;
}
static const struct dev_pm_ops i915_pm_ops = {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3fc2e3d..8f68678 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -41,6 +41,7 @@
#include <linux/i2c-algo-bit.h>
#include <drm/intel-gtt.h>
#include <linux/backlight.h>
+#include <linux/hashtable.h>
#include <linux/intel-iommu.h>
#include <linux/kref.h>
#include <linux/pm_qos.h>
@@ -92,7 +93,7 @@
};
#define port_name(p) ((p) + 'A')
-#define I915_NUM_PHYS_VLV 1
+#define I915_NUM_PHYS_VLV 2
enum dpio_channel {
DPIO_CH0,
@@ -163,6 +164,12 @@
#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++)
+#define for_each_crtc(dev, crtc) \
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+
+#define for_each_intel_crtc(dev, intel_crtc) \
+ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
+
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
if ((intel_encoder)->base.crtc == (__crtc))
@@ -172,6 +179,7 @@
if ((intel_connector)->base.encoder == (__encoder))
struct drm_i915_private;
+struct i915_mmu_object;
enum intel_dpll_id {
DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
@@ -397,6 +405,7 @@
u32 tiling:2;
u32 dirty:1;
u32 purgeable:1;
+ u32 userptr:1;
s32 ring:4;
u32 cache_level:3;
} **active_bo, **pinned_bo;
@@ -461,10 +470,11 @@
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
uint32_t flags);
- int (*update_primary_plane)(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y);
+ void (*update_primary_plane)(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y);
void (*hpd_irq_setup)(struct drm_device *dev);
/* clock updates for mode set */
/* cursor updates */
@@ -557,6 +567,7 @@
int dpll_offsets[I915_MAX_PIPES];
int dpll_md_offsets[I915_MAX_PIPES];
int palette_offsets[I915_MAX_PIPES];
+ int cursor_offsets[I915_MAX_PIPES];
};
#undef DEFINE_FLAG
@@ -588,13 +599,13 @@
/* This must match up with the value previously used for execbuf2.rsvd1. */
#define DEFAULT_CONTEXT_ID 0
-struct i915_hw_context {
+struct intel_context {
struct kref ref;
int id;
bool is_initialized;
uint8_t remap_slice;
struct drm_i915_file_private *file_priv;
- struct intel_ring_buffer *last_ring;
+ struct intel_engine_cs *last_ring;
struct drm_i915_gem_object *obj;
struct i915_ctx_hang_stats hang_stats;
struct i915_address_space *vm;
@@ -819,6 +830,67 @@
u32 savePCH_PORT_HOTPLUG;
};
+struct vlv_s0ix_state {
+ /* GAM */
+ u32 wr_watermark;
+ u32 gfx_prio_ctrl;
+ u32 arb_mode;
+ u32 gfx_pend_tlb0;
+ u32 gfx_pend_tlb1;
+ u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
+ u32 media_max_req_count;
+ u32 gfx_max_req_count;
+ u32 render_hwsp;
+ u32 ecochk;
+ u32 bsd_hwsp;
+ u32 blt_hwsp;
+ u32 tlb_rd_addr;
+
+ /* MBC */
+ u32 g3dctl;
+ u32 gsckgctl;
+ u32 mbctl;
+
+ /* GCP */
+ u32 ucgctl1;
+ u32 ucgctl3;
+ u32 rcgctl1;
+ u32 rcgctl2;
+ u32 rstctl;
+ u32 misccpctl;
+
+ /* GPM */
+ u32 gfxpause;
+ u32 rpdeuhwtc;
+ u32 rpdeuc;
+ u32 ecobus;
+ u32 pwrdwnupctl;
+ u32 rp_down_timeout;
+ u32 rp_deucsw;
+ u32 rcubmabdtmr;
+ u32 rcedata;
+ u32 spare2gh;
+
+ /* Display 1 CZ domain */
+ u32 gt_imr;
+ u32 gt_ier;
+ u32 pm_imr;
+ u32 pm_ier;
+ u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
+
+ /* GT SA CZ domain */
+ u32 tilectl;
+ u32 gt_fifoctl;
+ u32 gtlc_wake_ctrl;
+ u32 gtlc_survive;
+ u32 pmwgicz;
+
+ /* Display 2 CZ domain */
+ u32 gu_ctl0;
+ u32 gu_ctl1;
+ u32 clock_gate_dis2;
+};
+
struct intel_gen6_power_mgmt {
/* work and pm_iir are protected by dev_priv->irq_lock */
struct work_struct work;
@@ -987,7 +1059,8 @@
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
- struct shrinker inactive_shrinker;
+ struct notifier_block oom_notifier;
+ struct shrinker shrinker;
bool shrinker_no_lock_stealing;
/** LRU list of objects with fence regs on them. */
@@ -1025,6 +1098,9 @@
*/
bool busy;
+ /* the indicator for dispatch video commands on two BSD rings */
+ int bsd_ring_dispatch_index;
+
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
/** Bit 6 swizzling required for Y tiling */
@@ -1290,10 +1366,13 @@
*/
uint32_t gpio_mmio_base;
+ /* MMIO base address for MIPI regs */
+ uint32_t mipi_mmio_base;
+
wait_queue_head_t gmbus_wait_queue;
struct pci_dev *bridge_dev;
- struct intel_ring_buffer ring[I915_NUM_RINGS];
+ struct intel_engine_cs ring[I915_NUM_RINGS];
uint32_t last_seqno, next_seqno;
drm_dma_handle_t *status_page_dmah;
@@ -1380,6 +1459,9 @@
struct i915_gtt gtt; /* VM representing the global address space */
struct i915_gem_mm mm;
+#if defined(CONFIG_MMU_NOTIFIER)
+ DECLARE_HASHTABLE(mmu_notifiers, 7);
+#endif
/* Kernel Modesetting */
@@ -1448,6 +1530,7 @@
u32 suspend_count;
struct i915_suspend_saved_registers regfile;
+ struct vlv_s0ix_state vlv_s0ix_state;
struct {
/*
@@ -1473,8 +1556,11 @@
struct i915_dri1_state dri1;
/* Old ums support infrastructure, same warning applies. */
struct i915_ums_state ums;
- /* the indicator for dispatch video commands on two BSD rings */
- int ring_index;
+
+ /*
+ * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
+ * will be rejected. Instead look for a better place.
+ */
};
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
@@ -1512,6 +1598,8 @@
*/
int (*get_pages)(struct drm_i915_gem_object *);
void (*put_pages)(struct drm_i915_gem_object *);
+ int (*dmabuf_export)(struct drm_i915_gem_object *);
+ void (*release)(struct drm_i915_gem_object *);
};
struct drm_i915_gem_object {
@@ -1602,7 +1690,7 @@
void *dma_buf_vmapping;
int vmapping_count;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
/** Breadcrumb of last rendering to the buffer. */
uint32_t last_read_seqno;
@@ -1625,8 +1713,20 @@
/** for phy allocated objects */
struct drm_i915_gem_phys_object *phys_obj;
-};
+ union {
+ struct i915_gem_userptr {
+ uintptr_t ptr;
+ unsigned read_only :1;
+ unsigned workers :4;
+#define I915_GEM_USERPTR_MAX_WORKERS 15
+
+ struct mm_struct *mm;
+ struct i915_mmu_object *mn;
+ struct work_struct *work;
+ } userptr;
+ };
+};
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
/**
@@ -1641,7 +1741,7 @@
*/
struct drm_i915_gem_request {
/** On Which ring this request was generated */
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
/** GEM sequence number associated with this request. */
uint32_t seqno;
@@ -1653,7 +1753,7 @@
u32 tail;
/** Context related to this request */
- struct i915_hw_context *ctx;
+ struct intel_context *ctx;
/** Batch buffer related to this request if any */
struct drm_i915_gem_object *batch_obj;
@@ -1680,9 +1780,8 @@
} mm;
struct idr context_idr;
- struct i915_hw_context *private_default_ctx;
atomic_t rps_wait_boost;
- struct intel_ring_buffer *bsd_ring;
+ struct intel_engine_cs *bsd_ring;
};
/*
@@ -1848,9 +1947,10 @@
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
-#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev))
-#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) \
- && !IS_BROADWELL(dev))
+#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && \
+ (!IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
+#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 \
+ && !IS_GEN8(dev))
#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false)
#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true)
@@ -1889,7 +1989,7 @@
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
- IS_BROADWELL(dev))
+ IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -2050,6 +2150,9 @@
struct drm_file *file_priv);
int i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_init_userptr(struct drm_device *dev);
+int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
@@ -2105,9 +2208,9 @@
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *to);
+ struct intel_engine_cs *to);
void i915_vma_move_to_active(struct i915_vma *vma,
- struct intel_ring_buffer *ring);
+ struct intel_engine_cs *ring);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -2127,31 +2230,14 @@
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
-static inline bool
-i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
-{
- if (obj->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- dev_priv->fence_regs[obj->fence_reg].pin_count++;
- return true;
- } else
- return false;
-}
-
-static inline void
-i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
-{
- if (obj->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
- dev_priv->fence_regs[obj->fence_reg].pin_count--;
- }
-}
+bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
+void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
struct drm_i915_gem_request *
-i915_gem_find_active_request(struct intel_ring_buffer *ring);
+i915_gem_find_active_request(struct intel_engine_cs *ring);
bool i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
@@ -2187,18 +2273,18 @@
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
-int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
+int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev);
-int __i915_add_request(struct intel_ring_buffer *ring,
+int __i915_add_request(struct intel_engine_cs *ring,
struct drm_file *file,
struct drm_i915_gem_object *batch_obj,
u32 *seqno);
#define i915_add_request(ring, seqno) \
__i915_add_request(ring, NULL, NULL, seqno)
-int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
+int __must_check i915_wait_seqno(struct intel_engine_cs *ring,
uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
@@ -2209,7 +2295,7 @@
int __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
- struct intel_ring_buffer *pipelined);
+ struct intel_engine_cs *pipelined);
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
int i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -2311,22 +2397,22 @@
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
int i915_gem_context_enable(struct drm_i915_private *dev_priv);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
-int i915_switch_context(struct intel_ring_buffer *ring,
- struct i915_hw_context *to);
-struct i915_hw_context *
+int i915_switch_context(struct intel_engine_cs *ring,
+ struct intel_context *to);
+struct intel_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
void i915_gem_context_free(struct kref *ctx_ref);
-static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
+static inline void i915_gem_context_reference(struct intel_context *ctx)
{
kref_get(&ctx->ref);
}
-static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
+static inline void i915_gem_context_unreference(struct intel_context *ctx)
{
kref_put(&ctx->ref, i915_gem_context_free);
}
-static inline bool i915_gem_context_is_default(const struct i915_hw_context *c)
+static inline bool i915_gem_context_is_default(const struct intel_context *c)
{
return c->id == DEFAULT_CONTEXT_ID;
}
@@ -2336,6 +2422,8 @@
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+/* i915_gem_render_state.c */
+int i915_gem_render_state_init(struct intel_engine_cs *ring);
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev,
struct i915_address_space *vm,
@@ -2420,9 +2508,10 @@
/* i915_cmd_parser.c */
int i915_cmd_parser_get_version(void);
-void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring);
-bool i915_needs_cmd_parser(struct intel_ring_buffer *ring);
-int i915_parse_cmds(struct intel_ring_buffer *ring,
+int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
+void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
+bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
+int i915_parse_cmds(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
u32 batch_start_offset,
bool is_master);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e1fa919..87e9b34 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -31,6 +31,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include <linux/oom.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
@@ -57,14 +58,15 @@
struct drm_i915_fence_reg *fence,
bool enable);
-static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
+static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
struct shrink_control *sc);
-static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
+static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
struct shrink_control *sc);
+static int i915_gem_shrinker_oom(struct notifier_block *nb,
+ unsigned long event,
+ void *ptr);
static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
-static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
-static void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level)
@@ -977,7 +979,7 @@
* equal.
*/
static int
-i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
+i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
{
int ret;
@@ -996,7 +998,7 @@
}
static bool missed_irq(struct drm_i915_private *dev_priv,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
}
@@ -1027,7 +1029,7 @@
* Returns 0 if the seqno was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
*/
-static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
+static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
unsigned reset_counter,
bool interruptible,
struct timespec *timeout,
@@ -1134,7 +1136,7 @@
* request and object lists appropriately for that event.
*/
int
-i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
+i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1159,7 +1161,7 @@
static int
i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
if (!obj->active)
return 0;
@@ -1184,7 +1186,7 @@
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly)
{
- struct intel_ring_buffer *ring = obj->ring;
+ struct intel_engine_cs *ring = obj->ring;
u32 seqno;
int ret;
@@ -1209,7 +1211,7 @@
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = obj->ring;
+ struct intel_engine_cs *ring = obj->ring;
unsigned reset_counter;
u32 seqno;
int ret;
@@ -1692,12 +1694,16 @@
return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}
+static inline int
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
+{
+ return obj->madv == I915_MADV_DONTNEED;
+}
+
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
{
- struct inode *inode;
-
i915_gem_object_free_mmap_offset(obj);
if (obj->base.filp == NULL)
@@ -1708,16 +1714,28 @@
* To do this we must instruct the shmfs to drop all of its
* backing pages, *now*.
*/
- inode = file_inode(obj->base.filp);
- shmem_truncate_range(inode, 0, (loff_t)-1);
-
+ shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
obj->madv = __I915_MADV_PURGED;
}
-static inline int
-i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
+/* Try to discard unwanted pages */
+static void
+i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
{
- return obj->madv == I915_MADV_DONTNEED;
+ struct address_space *mapping;
+
+ switch (obj->madv) {
+ case I915_MADV_DONTNEED:
+ i915_gem_object_truncate(obj);
+ case __I915_MADV_PURGED:
+ return;
+ }
+
+ if (obj->base.filp == NULL)
+ return;
+
+ mapping = file_inode(obj->base.filp)->i_mapping,
+ invalidate_mapping_pages(mapping, 0, (loff_t)-1);
}
static void
@@ -1782,8 +1800,7 @@
ops->put_pages(obj);
obj->pages = NULL;
- if (i915_gem_object_is_purgeable(obj))
- i915_gem_object_truncate(obj);
+ i915_gem_object_invalidate(obj);
return 0;
}
@@ -1974,7 +1991,19 @@
page_cache_release(sg_page_iter_page(&sg_iter));
sg_free_table(st);
kfree(st);
- return PTR_ERR(page);
+
+ /* shmemfs first checks if there is enough memory to allocate the page
+ * and reports ENOSPC should there be insufficient, along with the usual
+ * ENOMEM for a genuine allocation failure.
+ *
+ * We use ENOSPC in our driver to mean that we have run out of aperture
+ * space and so want to translate the error from shmemfs back to our
+ * usual understanding of ENOMEM.
+ */
+ if (PTR_ERR(page) == -ENOSPC)
+ return -ENOMEM;
+ else
+ return PTR_ERR(page);
}
/* Ensure that the associated pages are gathered from the backing storage
@@ -2011,7 +2040,7 @@
static void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2049,7 +2078,7 @@
}
void i915_vma_move_to_active(struct i915_vma *vma,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
list_move_tail(&vma->mm_list, &vma->vm->active_list);
return i915_gem_object_move_to_active(vma->obj, ring);
@@ -2090,7 +2119,7 @@
static void
i915_gem_object_retire(struct drm_i915_gem_object *obj)
{
- struct intel_ring_buffer *ring = obj->ring;
+ struct intel_engine_cs *ring = obj->ring;
if (ring == NULL)
return;
@@ -2104,7 +2133,7 @@
i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int ret, i, j;
/* Carefully retire all requests without writing to the rings */
@@ -2170,7 +2199,7 @@
return 0;
}
-int __i915_add_request(struct intel_ring_buffer *ring,
+int __i915_add_request(struct intel_engine_cs *ring,
struct drm_file *file,
struct drm_i915_gem_object *obj,
u32 *out_seqno)
@@ -2275,7 +2304,7 @@
}
static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
- const struct i915_hw_context *ctx)
+ const struct intel_context *ctx)
{
unsigned long elapsed;
@@ -2299,7 +2328,7 @@
}
static void i915_set_reset_status(struct drm_i915_private *dev_priv,
- struct i915_hw_context *ctx,
+ struct intel_context *ctx,
const bool guilty)
{
struct i915_ctx_hang_stats *hs;
@@ -2330,7 +2359,7 @@
}
struct drm_i915_gem_request *
-i915_gem_find_active_request(struct intel_ring_buffer *ring)
+i915_gem_find_active_request(struct intel_engine_cs *ring)
{
struct drm_i915_gem_request *request;
u32 completed_seqno;
@@ -2348,7 +2377,7 @@
}
static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
struct drm_i915_gem_request *request;
bool ring_hung;
@@ -2367,7 +2396,7 @@
}
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
while (!list_empty(&ring->active_list)) {
struct drm_i915_gem_object *obj;
@@ -2426,7 +2455,7 @@
void i915_gem_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int i;
/*
@@ -2448,8 +2477,8 @@
/**
* This function clears the request list as sequence numbers are passed.
*/
-static void
-i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
+void
+i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
{
uint32_t seqno;
@@ -2494,7 +2523,7 @@
* of tail of the request to update the last known position
* of the GPU head.
*/
- ring->last_retired_head = request->tail;
+ ring->buffer->last_retired_head = request->tail;
i915_gem_free_request(request);
}
@@ -2512,7 +2541,7 @@
i915_gem_retire_requests(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
bool idle = true;
int i;
@@ -2606,7 +2635,7 @@
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj;
- struct intel_ring_buffer *ring = NULL;
+ struct intel_engine_cs *ring = NULL;
struct timespec timeout_stack, *timeout = NULL;
unsigned reset_counter;
u32 seqno = 0;
@@ -2677,9 +2706,9 @@
*/
int
i915_gem_object_sync(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *to)
+ struct intel_engine_cs *to)
{
- struct intel_ring_buffer *from = obj->ring;
+ struct intel_engine_cs *from = obj->ring;
u32 seqno;
int ret, idx;
@@ -2762,12 +2791,14 @@
* cause memory corruption through use-after-free.
*/
- i915_gem_object_finish_gtt(obj);
+ if (i915_is_ggtt(vma->vm)) {
+ i915_gem_object_finish_gtt(obj);
- /* release the fence reg _after_ flushing */
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- return ret;
+ /* release the fence reg _after_ flushing */
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ return ret;
+ }
trace_i915_vma_unbind(vma);
@@ -2800,7 +2831,7 @@
int i915_gpu_idle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int ret, i;
/* Flush everything onto the inactive list. */
@@ -3041,6 +3072,9 @@
fence = &dev_priv->fence_regs[obj->fence_reg];
+ if (WARN_ON(fence->pin_count))
+ return -EBUSY;
+
i915_gem_object_fence_lost(obj);
i915_gem_object_update_fence(obj, fence, false);
@@ -3637,6 +3671,15 @@
static bool is_pin_display(struct drm_i915_gem_object *obj)
{
+ struct i915_vma *vma;
+
+ if (list_empty(&obj->vma_list))
+ return false;
+
+ vma = i915_gem_obj_to_ggtt(obj);
+ if (!vma)
+ return false;
+
/* There are 3 sources that pin objects:
* 1. The display engine (scanouts, sprites, cursors);
* 2. Reservations for execbuffer;
@@ -3648,7 +3691,7 @@
* subtracting the potential reference by the user, any pin_count
* remains, it must be due to another use by the display engine.
*/
- return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count;
+ return vma->pin_count - !!obj->user_pin_count;
}
/*
@@ -3659,9 +3702,10 @@
int
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
- struct intel_ring_buffer *pipelined)
+ struct intel_engine_cs *pipelined)
{
u32 old_read_domains, old_write_domain;
+ bool was_pin_display;
int ret;
if (pipelined != obj->ring) {
@@ -3673,6 +3717,7 @@
/* Mark the pin_display early so that we account for the
* display coherency whilst setting up the cache domains.
*/
+ was_pin_display = obj->pin_display;
obj->pin_display = true;
/* The display engine is not coherent with the LLC cache on gen6. As
@@ -3715,7 +3760,8 @@
return 0;
err_unpin_display:
- obj->pin_display = is_pin_display(obj);
+ WARN_ON(was_pin_display != is_pin_display(obj));
+ obj->pin_display = was_pin_display;
return ret;
}
@@ -3812,7 +3858,7 @@
struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
struct drm_i915_gem_request *request;
- struct intel_ring_buffer *ring = NULL;
+ struct intel_engine_cs *ring = NULL;
unsigned reset_counter;
u32 seqno = 0;
int ret;
@@ -3852,9 +3898,13 @@
uint32_t alignment,
unsigned flags)
{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_vma *vma;
int ret;
+ if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
+ return -ENODEV;
+
if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
return -EINVAL;
@@ -3910,6 +3960,32 @@
obj->pin_mappable = false;
}
+bool
+i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
+{
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
+
+ WARN_ON(!ggtt_vma ||
+ dev_priv->fence_regs[obj->fence_reg].pin_count >
+ ggtt_vma->pin_count);
+ dev_priv->fence_regs[obj->fence_reg].pin_count++;
+ return true;
+ } else
+ return false;
+}
+
+void
+i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
+{
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
+ dev_priv->fence_regs[obj->fence_reg].pin_count--;
+ }
+}
+
int
i915_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
@@ -4170,6 +4246,30 @@
return obj;
}
+static bool discard_backing_storage(struct drm_i915_gem_object *obj)
+{
+ /* If we are the last user of the backing storage (be it shmemfs
+ * pages or stolen etc), we know that the pages are going to be
+ * immediately released. In this case, we can then skip copying
+ * back the contents from the GPU.
+ */
+
+ if (obj->madv != I915_MADV_WILLNEED)
+ return false;
+
+ if (obj->base.filp == NULL)
+ return true;
+
+ /* At first glance, this looks racy, but then again so would be
+ * userspace racing mmap against close. However, the first external
+ * reference to the filp can only be obtained through the
+ * i915_gem_mmap_ioctl() which safeguards us against the user
+ * acquiring such a reference whilst we are in the middle of
+ * freeing the object.
+ */
+ return atomic_long_read(&obj->base.filp->f_count) == 1;
+}
+
void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
@@ -4208,6 +4308,8 @@
if (WARN_ON(obj->pages_pin_count))
obj->pages_pin_count = 0;
+ if (discard_backing_storage(obj))
+ obj->madv = I915_MADV_DONTNEED;
i915_gem_object_put_pages(obj);
i915_gem_object_free_mmap_offset(obj);
i915_gem_object_release_stolen(obj);
@@ -4217,6 +4319,9 @@
if (obj->base.import_attach)
drm_prime_gem_destroy(&obj->base, NULL);
+ if (obj->ops->release)
+ obj->ops->release(obj);
+
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev_priv, obj->base.size);
@@ -4254,7 +4359,7 @@
i915_gem_stop_ringbuffers(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int i;
for_each_ring(ring, dev_priv, i)
@@ -4303,7 +4408,7 @@
return ret;
}
-int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
+int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4496,6 +4601,7 @@
DRM_DEBUG_DRIVER("allow wake ack timed out\n");
}
+ i915_gem_init_userptr(dev);
i915_gem_init_global_gtt(dev);
ret = i915_gem_context_init(dev);
@@ -4526,7 +4632,7 @@
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int i;
for_each_ring(ring, dev_priv, i)
@@ -4602,7 +4708,7 @@
}
static void
-init_ring_lists(struct intel_ring_buffer *ring)
+init_ring_lists(struct intel_engine_cs *ring)
{
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
@@ -4677,10 +4783,13 @@
dev_priv->mm.interruptible = true;
- dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
- dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
- dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
- register_shrinker(&dev_priv->mm.inactive_shrinker);
+ dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
+ dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
+ dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
+ register_shrinker(&dev_priv->mm.shrinker);
+
+ dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
+ register_oom_notifier(&dev_priv->mm.oom_notifier);
}
/*
@@ -4939,27 +5048,46 @@
#endif
}
-static unsigned long
-i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
+static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
{
- struct drm_i915_private *dev_priv =
- container_of(shrinker,
- struct drm_i915_private,
- mm.inactive_shrinker);
- struct drm_device *dev = dev_priv->dev;
- struct drm_i915_gem_object *obj;
- bool unlock = true;
- unsigned long count;
-
if (!mutex_trylock(&dev->struct_mutex)) {
if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return 0;
+ return false;
- if (dev_priv->mm.shrinker_no_lock_stealing)
- return 0;
+ if (to_i915(dev)->mm.shrinker_no_lock_stealing)
+ return false;
- unlock = false;
- }
+ *unlock = false;
+ } else
+ *unlock = true;
+
+ return true;
+}
+
+static int num_vma_bound(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma;
+ int count = 0;
+
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (drm_mm_node_allocated(&vma->node))
+ count++;
+
+ return count;
+}
+
+static unsigned long
+i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(shrinker, struct drm_i915_private, mm.shrinker);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_i915_gem_object *obj;
+ unsigned long count;
+ bool unlock;
+
+ if (!i915_gem_shrinker_lock(dev, &unlock))
+ return 0;
count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
@@ -4967,10 +5095,8 @@
count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if (obj->active)
- continue;
-
- if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
+ if (!i915_gem_obj_is_pinned(obj) &&
+ obj->pages_pin_count == num_vma_bound(obj))
count += obj->base.size >> PAGE_SHIFT;
}
@@ -5043,44 +5169,99 @@
}
static unsigned long
-i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
+i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
- container_of(shrinker,
- struct drm_i915_private,
- mm.inactive_shrinker);
+ container_of(shrinker, struct drm_i915_private, mm.shrinker);
struct drm_device *dev = dev_priv->dev;
unsigned long freed;
- bool unlock = true;
+ bool unlock;
- if (!mutex_trylock(&dev->struct_mutex)) {
- if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return SHRINK_STOP;
-
- if (dev_priv->mm.shrinker_no_lock_stealing)
- return SHRINK_STOP;
-
- unlock = false;
- }
+ if (!i915_gem_shrinker_lock(dev, &unlock))
+ return SHRINK_STOP;
freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
if (freed < sc->nr_to_scan)
freed += __i915_gem_shrink(dev_priv,
sc->nr_to_scan - freed,
false);
- if (freed < sc->nr_to_scan)
- freed += i915_gem_shrink_all(dev_priv);
-
if (unlock)
mutex_unlock(&dev->struct_mutex);
return freed;
}
+static int
+i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(nb, struct drm_i915_private, mm.oom_notifier);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_i915_gem_object *obj;
+ unsigned long timeout = msecs_to_jiffies(5000) + 1;
+ unsigned long pinned, bound, unbound, freed;
+ bool was_interruptible;
+ bool unlock;
+
+ while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout)
+ schedule_timeout_killable(1);
+ if (timeout == 0) {
+ pr_err("Unable to purge GPU memory due lock contention.\n");
+ return NOTIFY_DONE;
+ }
+
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ freed = i915_gem_shrink_all(dev_priv);
+
+ dev_priv->mm.interruptible = was_interruptible;
+
+ /* Because we may be allocating inside our own driver, we cannot
+ * assert that there are no objects with pinned pages that are not
+ * being pointed to by hardware.
+ */
+ unbound = bound = pinned = 0;
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ if (!obj->base.filp) /* not backed by a freeable object */
+ continue;
+
+ if (obj->pages_pin_count)
+ pinned += obj->base.size;
+ else
+ unbound += obj->base.size;
+ }
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if (!obj->base.filp)
+ continue;
+
+ if (obj->pages_pin_count)
+ pinned += obj->base.size;
+ else
+ bound += obj->base.size;
+ }
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+
+ pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
+ freed, pinned);
+ if (unbound || bound)
+ pr_err("%lu and %lu bytes still available in the "
+ "bound and unbound GPU page lists.\n",
+ bound, unbound);
+
+ *(unsigned long *)ptr += freed;
+ return NOTIFY_DONE;
+}
+
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
+ /* This WARN has probably outlived its usefulness (callers already
+ * WARN if they don't find the GGTT vma they expect). When removing,
+ * remember to remove the pre-check in is_pin_display() as well */
if (WARN_ON(list_empty(&obj->vma_list)))
return NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index f77b4c12..3ffe308 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -178,7 +178,7 @@
void i915_gem_context_free(struct kref *ctx_ref)
{
- struct i915_hw_context *ctx = container_of(ctx_ref,
+ struct intel_context *ctx = container_of(ctx_ref,
typeof(*ctx), ref);
struct i915_hw_ppgtt *ppgtt = NULL;
@@ -199,7 +199,7 @@
}
static struct i915_hw_ppgtt *
-create_vm_for_ctx(struct drm_device *dev, struct i915_hw_context *ctx)
+create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
{
struct i915_hw_ppgtt *ppgtt;
int ret;
@@ -218,12 +218,12 @@
return ppgtt;
}
-static struct i915_hw_context *
+static struct intel_context *
__create_hw_context(struct drm_device *dev,
struct drm_i915_file_private *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_hw_context *ctx;
+ struct intel_context *ctx;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -285,14 +285,14 @@
* context state of the GPU for applications that don't utilize HW contexts, as
* well as an idle case.
*/
-static struct i915_hw_context *
+static struct intel_context *
i915_gem_create_context(struct drm_device *dev,
struct drm_i915_file_private *file_priv,
bool create_vm)
{
const bool is_global_default_ctx = file_priv == NULL;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_hw_context *ctx;
+ struct intel_context *ctx;
int ret = 0;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -364,8 +364,8 @@
/* Prevent the hardware from restoring the last context (which hung) on
* the next switch */
for (i = 0; i < I915_NUM_RINGS; i++) {
- struct intel_ring_buffer *ring = &dev_priv->ring[i];
- struct i915_hw_context *dctx = ring->default_context;
+ struct intel_engine_cs *ring = &dev_priv->ring[i];
+ struct intel_context *dctx = ring->default_context;
/* Do a fake switch to the default context */
if (ring->last_context == dctx)
@@ -391,7 +391,7 @@
int i915_gem_context_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_hw_context *ctx;
+ struct intel_context *ctx;
int i;
/* Init should only be called once per module load. Eventually the
@@ -426,7 +426,7 @@
void i915_gem_context_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
+ struct intel_context *dctx = dev_priv->ring[RCS].default_context;
int i;
if (dctx->obj) {
@@ -449,10 +449,12 @@
i915_gem_context_unreference(dctx);
dev_priv->ring[RCS].last_context = NULL;
}
+
+ i915_gem_object_ggtt_unpin(dctx->obj);
}
for (i = 0; i < I915_NUM_RINGS; i++) {
- struct intel_ring_buffer *ring = &dev_priv->ring[i];
+ struct intel_engine_cs *ring = &dev_priv->ring[i];
if (ring->last_context)
i915_gem_context_unreference(ring->last_context);
@@ -461,13 +463,12 @@
ring->last_context = NULL;
}
- i915_gem_object_ggtt_unpin(dctx->obj);
i915_gem_context_unreference(dctx);
}
int i915_gem_context_enable(struct drm_i915_private *dev_priv)
{
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int ret, i;
/* This is the only place the aliasing PPGTT gets enabled, which means
@@ -494,11 +495,7 @@
static int context_idr_cleanup(int id, void *p, void *data)
{
- struct i915_hw_context *ctx = p;
-
- /* Ignore the default context because close will handle it */
- if (i915_gem_context_is_default(ctx))
- return 0;
+ struct intel_context *ctx = p;
i915_gem_context_unreference(ctx);
return 0;
@@ -507,17 +504,17 @@
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct intel_context *ctx;
idr_init(&file_priv->context_idr);
mutex_lock(&dev->struct_mutex);
- file_priv->private_default_ctx =
- i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
+ ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
mutex_unlock(&dev->struct_mutex);
- if (IS_ERR(file_priv->private_default_ctx)) {
+ if (IS_ERR(ctx)) {
idr_destroy(&file_priv->context_idr);
- return PTR_ERR(file_priv->private_default_ctx);
+ return PTR_ERR(ctx);
}
return 0;
@@ -529,16 +526,14 @@
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr);
-
- i915_gem_context_unreference(file_priv->private_default_ctx);
}
-struct i915_hw_context *
+struct intel_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
{
- struct i915_hw_context *ctx;
+ struct intel_context *ctx;
- ctx = (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
+ ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
if (!ctx)
return ERR_PTR(-ENOENT);
@@ -546,8 +541,8 @@
}
static inline int
-mi_set_context(struct intel_ring_buffer *ring,
- struct i915_hw_context *new_context,
+mi_set_context(struct intel_engine_cs *ring,
+ struct intel_context *new_context,
u32 hw_flags)
{
int ret;
@@ -567,7 +562,7 @@
if (ret)
return ret;
- /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw */
+ /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
if (INTEL_INFO(ring->dev)->gen >= 7)
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
else
@@ -596,11 +591,11 @@
return ret;
}
-static int do_switch(struct intel_ring_buffer *ring,
- struct i915_hw_context *to)
+static int do_switch(struct intel_engine_cs *ring,
+ struct intel_context *to)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
- struct i915_hw_context *from = ring->last_context;
+ struct intel_context *from = ring->last_context;
struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
u32 hw_flags = 0;
int ret, i;
@@ -701,13 +696,19 @@
i915_gem_context_unreference(from);
}
- to->is_initialized = true;
-
done:
i915_gem_context_reference(to);
ring->last_context = to;
to->last_ring = ring;
+ if (ring->id == RCS && !to->is_initialized && from == NULL) {
+ ret = i915_gem_render_state_init(ring);
+ if (ret)
+ DRM_ERROR("init render state: %d\n", ret);
+ }
+
+ to->is_initialized = true;
+
return 0;
unpin_out:
@@ -726,8 +727,8 @@
* it will have a refoucnt > 1. This allows us to destroy the context abstract
* object while letting the normal object tracking destroy the backing BO.
*/
-int i915_switch_context(struct intel_ring_buffer *ring,
- struct i915_hw_context *to)
+int i915_switch_context(struct intel_engine_cs *ring,
+ struct intel_context *to)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -756,7 +757,7 @@
{
struct drm_i915_gem_context_create *args = data;
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct i915_hw_context *ctx;
+ struct intel_context *ctx;
int ret;
if (!hw_context_enabled(dev))
@@ -782,7 +783,7 @@
{
struct drm_i915_gem_context_destroy *args = data;
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct i915_hw_context *ctx;
+ struct intel_context *ctx;
int ret;
if (args->ctx_id == DEFAULT_CONTEXT_ID)
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 321102a..580aa42 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -229,6 +229,14 @@
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags)
{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+
+ if (obj->ops->dmabuf_export) {
+ int ret = obj->ops->dmabuf_export(obj);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 47fe8ec..008e208 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -541,7 +541,7 @@
static int
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
- struct intel_ring_buffer *ring,
+ struct intel_engine_cs *ring,
bool *need_reloc)
{
struct drm_i915_gem_object *obj = vma->obj;
@@ -596,7 +596,7 @@
}
static int
-i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
+i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
struct list_head *vmas,
bool *need_relocs)
{
@@ -610,6 +610,8 @@
if (list_empty(vmas))
return 0;
+ i915_gem_retire_requests_ring(ring);
+
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
INIT_LIST_HEAD(&ordered_vmas);
@@ -711,7 +713,7 @@
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_i915_gem_execbuffer2 *args,
struct drm_file *file,
- struct intel_ring_buffer *ring,
+ struct intel_engine_cs *ring,
struct eb_vmas *eb,
struct drm_i915_gem_exec_object2 *exec)
{
@@ -827,7 +829,7 @@
}
static int
-i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
+i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
struct list_head *vmas)
{
struct i915_vma *vma;
@@ -910,11 +912,11 @@
return 0;
}
-static struct i915_hw_context *
+static struct intel_context *
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
- struct intel_ring_buffer *ring, const u32 ctx_id)
+ struct intel_engine_cs *ring, const u32 ctx_id)
{
- struct i915_hw_context *ctx = NULL;
+ struct intel_context *ctx = NULL;
struct i915_ctx_hang_stats *hs;
if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID)
@@ -935,7 +937,7 @@
static void
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
struct i915_vma *vma;
@@ -970,7 +972,7 @@
static void
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct drm_file *file,
- struct intel_ring_buffer *ring,
+ struct intel_engine_cs *ring,
struct drm_i915_gem_object *obj)
{
/* Unconditionally force add_request to emit a full flush. */
@@ -982,7 +984,7 @@
static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
@@ -1025,12 +1027,12 @@
int ring_id;
mutex_lock(&dev->struct_mutex);
- if (dev_priv->ring_index == 0) {
+ if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
ring_id = VCS;
- dev_priv->ring_index = 1;
+ dev_priv->mm.bsd_ring_dispatch_index = 1;
} else {
ring_id = VCS2;
- dev_priv->ring_index = 0;
+ dev_priv->mm.bsd_ring_dispatch_index = 0;
}
file_priv->bsd_ring = &dev_priv->ring[ring_id];
mutex_unlock(&dev->struct_mutex);
@@ -1048,8 +1050,8 @@
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
- struct intel_ring_buffer *ring;
- struct i915_hw_context *ctx;
+ struct intel_engine_cs *ring;
+ struct intel_context *ctx;
struct i915_address_space *vm;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u64 exec_start = args->batch_start_offset, exec_len;
@@ -1168,6 +1170,11 @@
goto pre_mutex_err;
}
} else {
+ if (args->DR4 == 0xffffffff) {
+ DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
+ args->DR4 = 0;
+ }
+
if (args->DR1 || args->DR4 || args->cliprects_ptr) {
DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 4969162..9491636 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -30,7 +30,8 @@
#include "i915_trace.h"
#include "intel_drv.h"
-static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv);
+static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
+static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
bool intel_enable_ppgtt(struct drm_device *dev, bool full)
{
@@ -196,7 +197,7 @@
}
/* Broadwell Page Directory Pointer Descriptors */
-static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
+static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
uint64_t val, bool synchronous)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -226,7 +227,7 @@
}
static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct intel_ring_buffer *ring,
+ struct intel_engine_cs *ring,
bool synchronous)
{
int i, ret;
@@ -275,6 +276,8 @@
num_entries--;
}
+ if (!HAS_LLC(ppgtt->base.dev))
+ drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
kunmap_atomic(pt_vaddr);
pte = 0;
@@ -311,6 +314,8 @@
gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
cache_level, true);
if (++pte == GEN8_PTES_PER_PAGE) {
+ if (!HAS_LLC(ppgtt->base.dev))
+ drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
kunmap_atomic(pt_vaddr);
pt_vaddr = NULL;
if (++pde == GEN8_PDES_PER_PAGE) {
@@ -320,8 +325,11 @@
pte = 0;
}
}
- if (pt_vaddr)
+ if (pt_vaddr) {
+ if (!HAS_LLC(ppgtt->base.dev))
+ drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
kunmap_atomic(pt_vaddr);
+ }
}
static void gen8_free_page_tables(struct page **pt_pages)
@@ -584,6 +592,8 @@
pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
I915_CACHE_LLC);
}
+ if (!HAS_LLC(ppgtt->base.dev))
+ drm_clflush_virt_range(pd_vaddr, PAGE_SIZE);
kunmap_atomic(pd_vaddr);
}
@@ -696,7 +706,7 @@
}
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct intel_ring_buffer *ring,
+ struct intel_engine_cs *ring,
bool synchronous)
{
struct drm_device *dev = ppgtt->base.dev;
@@ -740,7 +750,7 @@
}
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct intel_ring_buffer *ring,
+ struct intel_engine_cs *ring,
bool synchronous)
{
struct drm_device *dev = ppgtt->base.dev;
@@ -791,7 +801,7 @@
}
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct intel_ring_buffer *ring,
+ struct intel_engine_cs *ring,
bool synchronous)
{
struct drm_device *dev = ppgtt->base.dev;
@@ -812,7 +822,7 @@
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int j, ret;
for_each_ring(ring, dev_priv, j) {
@@ -842,7 +852,7 @@
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
uint32_t ecochk, ecobits;
int i;
@@ -881,7 +891,7 @@
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
uint32_t ecochk, gab_ctl, ecobits;
int i;
@@ -1025,8 +1035,7 @@
&ppgtt->node, GEN6_PD_SIZE,
GEN6_PD_ALIGN, 0,
0, dev_priv->gtt.base.total,
- DRM_MM_SEARCH_DEFAULT,
- DRM_MM_CREATE_DEFAULT);
+ DRM_MM_TOPDOWN);
if (ret == -ENOSPC && !retried) {
ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
GEN6_PD_SIZE, GEN6_PD_ALIGN,
@@ -1250,7 +1259,7 @@
void i915_check_and_clear_faults(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int i;
if (INTEL_INFO(dev)->gen < 6)
@@ -1325,7 +1334,11 @@
if (INTEL_INFO(dev)->gen >= 8) {
- gen8_setup_private_ppat(dev_priv);
+ if (IS_CHERRYVIEW(dev))
+ chv_setup_private_ppat(dev_priv);
+ else
+ bdw_setup_private_ppat(dev_priv);
+
return;
}
@@ -1753,6 +1766,17 @@
return bdw_gmch_ctl << 20;
}
+static inline unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
+{
+ gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
+ gmch_ctrl &= SNB_GMCH_GGMS_MASK;
+
+ if (gmch_ctrl)
+ return 1 << (20 + gmch_ctrl);
+
+ return 0;
+}
+
static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
@@ -1767,6 +1791,24 @@
return bdw_gmch_ctl << 25; /* 32 MB units */
}
+static size_t chv_get_stolen_size(u16 gmch_ctrl)
+{
+ gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
+ gmch_ctrl &= SNB_GMCH_GMS_MASK;
+
+ /*
+ * 0x0 to 0x10: 32MB increments starting at 0MB
+ * 0x11 to 0x16: 4MB increments starting at 8MB
+ * 0x17 to 0x1d: 4MB increments start at 36MB
+ */
+ if (gmch_ctrl < 0x11)
+ return gmch_ctrl << 25;
+ else if (gmch_ctrl < 0x17)
+ return (gmch_ctrl - 0x11 + 2) << 22;
+ else
+ return (gmch_ctrl - 0x17 + 9) << 22;
+}
+
static int ggtt_probe_common(struct drm_device *dev,
size_t gtt_size)
{
@@ -1797,7 +1839,7 @@
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
* bits. When using advanced contexts each context stores its own PAT, but
* writing this data shouldn't be harmful even in those cases. */
-static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv)
+static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
{
uint64_t pat;
@@ -1816,6 +1858,33 @@
I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
}
+static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
+{
+ uint64_t pat;
+
+ /*
+ * Map WB on BDW to snooped on CHV.
+ *
+ * Only the snoop bit has meaning for CHV, the rest is
+ * ignored.
+ *
+ * Note that the harware enforces snooping for all page
+ * table accesses. The snoop bit is actually ignored for
+ * PDEs.
+ */
+ pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(1, 0) |
+ GEN8_PPAT(2, 0) |
+ GEN8_PPAT(3, 0) |
+ GEN8_PPAT(4, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(5, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(6, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(7, CHV_PPAT_SNOOP);
+
+ I915_WRITE(GEN8_PRIVATE_PAT, pat);
+ I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
+}
+
static int gen8_gmch_probe(struct drm_device *dev,
size_t *gtt_total,
size_t *stolen,
@@ -1836,12 +1905,20 @@
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- *stolen = gen8_get_stolen_size(snb_gmch_ctl);
+ if (IS_CHERRYVIEW(dev)) {
+ *stolen = chv_get_stolen_size(snb_gmch_ctl);
+ gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
+ } else {
+ *stolen = gen8_get_stolen_size(snb_gmch_ctl);
+ gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
+ }
- gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
*gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT;
- gen8_setup_private_ppat(dev_priv);
+ if (IS_CHERRYVIEW(dev))
+ chv_setup_private_ppat(dev_priv);
+ else
+ bdw_setup_private_ppat(dev_priv);
ret = ggtt_probe_common(dev, gtt_size);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index b5e8ac0..1b96a06 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -95,6 +95,7 @@
#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
+#define CHV_PPAT_SNOOP (1<<6)
#define GEN8_PPAT_AGE(x) (x<<4)
#define GEN8_PPAT_LLCeLLC (3<<2)
#define GEN8_PPAT_LLCELLC (2<<2)
@@ -256,11 +257,11 @@
dma_addr_t *gen8_pt_dma_addr[4];
};
- struct i915_hw_context *ctx;
+ struct intel_context *ctx;
int (*enable)(struct i915_hw_ppgtt *ppgtt);
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
- struct intel_ring_buffer *ring,
+ struct intel_engine_cs *ring,
bool synchronous);
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
};
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
new file mode 100644
index 0000000..3521f99
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Mika Kuoppala <mika.kuoppala@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_renderstate.h"
+
+struct i915_render_state {
+ struct drm_i915_gem_object *obj;
+ unsigned long ggtt_offset;
+ void *batch;
+ u32 size;
+ u32 len;
+};
+
+static struct i915_render_state *render_state_alloc(struct drm_device *dev)
+{
+ struct i915_render_state *so;
+ struct page *page;
+ int ret;
+
+ so = kzalloc(sizeof(*so), GFP_KERNEL);
+ if (!so)
+ return ERR_PTR(-ENOMEM);
+
+ so->obj = i915_gem_alloc_object(dev, 4096);
+ if (so->obj == NULL) {
+ ret = -ENOMEM;
+ goto free;
+ }
+ so->size = 4096;
+
+ ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
+ if (ret)
+ goto free_gem;
+
+ BUG_ON(so->obj->pages->nents != 1);
+ page = sg_page(so->obj->pages->sgl);
+
+ so->batch = kmap(page);
+ if (!so->batch) {
+ ret = -ENOMEM;
+ goto unpin;
+ }
+
+ so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
+
+ return so;
+unpin:
+ i915_gem_object_ggtt_unpin(so->obj);
+free_gem:
+ drm_gem_object_unreference(&so->obj->base);
+free:
+ kfree(so);
+ return ERR_PTR(ret);
+}
+
+static void render_state_free(struct i915_render_state *so)
+{
+ kunmap(so->batch);
+ i915_gem_object_ggtt_unpin(so->obj);
+ drm_gem_object_unreference(&so->obj->base);
+ kfree(so);
+}
+
+static const struct intel_renderstate_rodata *
+render_state_get_rodata(struct drm_device *dev, const int gen)
+{
+ switch (gen) {
+ case 6:
+ return &gen6_null_state;
+ case 7:
+ return &gen7_null_state;
+ case 8:
+ return &gen8_null_state;
+ }
+
+ return NULL;
+}
+
+static int render_state_setup(const int gen,
+ const struct intel_renderstate_rodata *rodata,
+ struct i915_render_state *so)
+{
+ const u64 goffset = i915_gem_obj_ggtt_offset(so->obj);
+ u32 reloc_index = 0;
+ u32 * const d = so->batch;
+ unsigned int i = 0;
+ int ret;
+
+ if (!rodata || rodata->batch_items * 4 > so->size)
+ return -EINVAL;
+
+ ret = i915_gem_object_set_to_cpu_domain(so->obj, true);
+ if (ret)
+ return ret;
+
+ while (i < rodata->batch_items) {
+ u32 s = rodata->batch[i];
+
+ if (reloc_index < rodata->reloc_items &&
+ i * 4 == rodata->reloc[reloc_index]) {
+
+ s += goffset & 0xffffffff;
+
+ /* We keep batch offsets max 32bit */
+ if (gen >= 8) {
+ if (i + 1 >= rodata->batch_items ||
+ rodata->batch[i + 1] != 0)
+ return -EINVAL;
+
+ d[i] = s;
+ i++;
+ s = (goffset & 0xffffffff00000000ull) >> 32;
+ }
+
+ reloc_index++;
+ }
+
+ d[i] = s;
+ i++;
+ }
+
+ ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
+ if (ret)
+ return ret;
+
+ if (rodata->reloc_items != reloc_index) {
+ DRM_ERROR("not all relocs resolved, %d out of %d\n",
+ reloc_index, rodata->reloc_items);
+ return -EINVAL;
+ }
+
+ so->len = rodata->batch_items * 4;
+
+ return 0;
+}
+
+int i915_gem_render_state_init(struct intel_engine_cs *ring)
+{
+ const int gen = INTEL_INFO(ring->dev)->gen;
+ struct i915_render_state *so;
+ const struct intel_renderstate_rodata *rodata;
+ int ret;
+
+ if (WARN_ON(ring->id != RCS))
+ return -ENOENT;
+
+ rodata = render_state_get_rodata(ring->dev, gen);
+ if (rodata == NULL)
+ return 0;
+
+ so = render_state_alloc(ring->dev);
+ if (IS_ERR(so))
+ return PTR_ERR(so);
+
+ ret = render_state_setup(gen, rodata, so);
+ if (ret)
+ goto out;
+
+ ret = ring->dispatch_execbuffer(ring,
+ i915_gem_obj_ggtt_offset(so->obj),
+ so->len,
+ I915_DISPATCH_SECURE);
+ if (ret)
+ goto out;
+
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(so->obj), ring);
+
+ ret = __i915_add_request(ring, NULL, so->obj, NULL);
+ /* __i915_add_request moves object to inactive if it fails */
+out:
+ render_state_free(so);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
new file mode 100644
index 0000000..21ea928
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -0,0 +1,711 @@
+/*
+ * Copyright © 2012-2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+#include <linux/mmu_context.h>
+#include <linux/mmu_notifier.h>
+#include <linux/mempolicy.h>
+#include <linux/swap.h>
+
+#if defined(CONFIG_MMU_NOTIFIER)
+#include <linux/interval_tree.h>
+
+struct i915_mmu_notifier {
+ spinlock_t lock;
+ struct hlist_node node;
+ struct mmu_notifier mn;
+ struct rb_root objects;
+ struct drm_device *dev;
+ struct mm_struct *mm;
+ struct work_struct work;
+ unsigned long count;
+ unsigned long serial;
+};
+
+struct i915_mmu_object {
+ struct i915_mmu_notifier *mmu;
+ struct interval_tree_node it;
+ struct drm_i915_gem_object *obj;
+};
+
+static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn);
+ struct interval_tree_node *it = NULL;
+ unsigned long serial = 0;
+
+ end--; /* interval ranges are inclusive, but invalidate range is exclusive */
+ while (start < end) {
+ struct drm_i915_gem_object *obj;
+
+ obj = NULL;
+ spin_lock(&mn->lock);
+ if (serial == mn->serial)
+ it = interval_tree_iter_next(it, start, end);
+ else
+ it = interval_tree_iter_first(&mn->objects, start, end);
+ if (it != NULL) {
+ obj = container_of(it, struct i915_mmu_object, it)->obj;
+ drm_gem_object_reference(&obj->base);
+ serial = mn->serial;
+ }
+ spin_unlock(&mn->lock);
+ if (obj == NULL)
+ return;
+
+ mutex_lock(&mn->dev->struct_mutex);
+ /* Cancel any active worker and force us to re-evaluate gup */
+ obj->userptr.work = NULL;
+
+ if (obj->pages != NULL) {
+ struct drm_i915_private *dev_priv = to_i915(mn->dev);
+ struct i915_vma *vma, *tmp;
+ bool was_interruptible;
+
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
+ int ret = i915_vma_unbind(vma);
+ WARN_ON(ret && ret != -EIO);
+ }
+ WARN_ON(i915_gem_object_put_pages(obj));
+
+ dev_priv->mm.interruptible = was_interruptible;
+ }
+
+ start = obj->userptr.ptr + obj->base.size;
+
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&mn->dev->struct_mutex);
+ }
+}
+
+static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
+ .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
+};
+
+static struct i915_mmu_notifier *
+__i915_mmu_notifier_lookup(struct drm_device *dev, struct mm_struct *mm)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_mmu_notifier *mmu;
+
+ /* Protected by dev->struct_mutex */
+ hash_for_each_possible(dev_priv->mmu_notifiers, mmu, node, (unsigned long)mm)
+ if (mmu->mm == mm)
+ return mmu;
+
+ return NULL;
+}
+
+static struct i915_mmu_notifier *
+i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_mmu_notifier *mmu;
+ int ret;
+
+ lockdep_assert_held(&dev->struct_mutex);
+
+ mmu = __i915_mmu_notifier_lookup(dev, mm);
+ if (mmu)
+ return mmu;
+
+ mmu = kmalloc(sizeof(*mmu), GFP_KERNEL);
+ if (mmu == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&mmu->lock);
+ mmu->dev = dev;
+ mmu->mn.ops = &i915_gem_userptr_notifier;
+ mmu->mm = mm;
+ mmu->objects = RB_ROOT;
+ mmu->count = 0;
+ mmu->serial = 0;
+
+ /* Protected by mmap_sem (write-lock) */
+ ret = __mmu_notifier_register(&mmu->mn, mm);
+ if (ret) {
+ kfree(mmu);
+ return ERR_PTR(ret);
+ }
+
+ /* Protected by dev->struct_mutex */
+ hash_add(dev_priv->mmu_notifiers, &mmu->node, (unsigned long)mm);
+ return mmu;
+}
+
+static void
+__i915_mmu_notifier_destroy_worker(struct work_struct *work)
+{
+ struct i915_mmu_notifier *mmu = container_of(work, typeof(*mmu), work);
+ mmu_notifier_unregister(&mmu->mn, mmu->mm);
+ kfree(mmu);
+}
+
+static void
+__i915_mmu_notifier_destroy(struct i915_mmu_notifier *mmu)
+{
+ lockdep_assert_held(&mmu->dev->struct_mutex);
+
+ /* Protected by dev->struct_mutex */
+ hash_del(&mmu->node);
+
+ /* Our lock ordering is: mmap_sem, mmu_notifier_scru, struct_mutex.
+ * We enter the function holding struct_mutex, therefore we need
+ * to drop our mutex prior to calling mmu_notifier_unregister in
+ * order to prevent lock inversion (and system-wide deadlock)
+ * between the mmap_sem and struct-mutex. Hence we defer the
+ * unregistration to a workqueue where we hold no locks.
+ */
+ INIT_WORK(&mmu->work, __i915_mmu_notifier_destroy_worker);
+ schedule_work(&mmu->work);
+}
+
+static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu)
+{
+ if (++mmu->serial == 0)
+ mmu->serial = 1;
+}
+
+static void
+i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
+ struct i915_mmu_object *mn)
+{
+ lockdep_assert_held(&mmu->dev->struct_mutex);
+
+ spin_lock(&mmu->lock);
+ interval_tree_remove(&mn->it, &mmu->objects);
+ __i915_mmu_notifier_update_serial(mmu);
+ spin_unlock(&mmu->lock);
+
+ /* Protected against _add() by dev->struct_mutex */
+ if (--mmu->count == 0)
+ __i915_mmu_notifier_destroy(mmu);
+}
+
+static int
+i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
+ struct i915_mmu_object *mn)
+{
+ struct interval_tree_node *it;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(mmu->dev);
+ if (ret)
+ return ret;
+
+ /* Make sure we drop the final active reference (and thereby
+ * remove the objects from the interval tree) before we do
+ * the check for overlapping objects.
+ */
+ i915_gem_retire_requests(mmu->dev);
+
+ /* Disallow overlapping userptr objects */
+ spin_lock(&mmu->lock);
+ it = interval_tree_iter_first(&mmu->objects,
+ mn->it.start, mn->it.last);
+ if (it) {
+ struct drm_i915_gem_object *obj;
+
+ /* We only need to check the first object in the range as it
+ * either has cancelled gup work queued and we need to
+ * return back to the user to give time for the gup-workers
+ * to flush their object references upon which the object will
+ * be removed from the interval-tree, or the the range is
+ * still in use by another client and the overlap is invalid.
+ */
+
+ obj = container_of(it, struct i915_mmu_object, it)->obj;
+ ret = obj->userptr.workers ? -EAGAIN : -EINVAL;
+ } else {
+ interval_tree_insert(&mn->it, &mmu->objects);
+ __i915_mmu_notifier_update_serial(mmu);
+ ret = 0;
+ }
+ spin_unlock(&mmu->lock);
+ mutex_unlock(&mmu->dev->struct_mutex);
+
+ return ret;
+}
+
+static void
+i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
+{
+ struct i915_mmu_object *mn;
+
+ mn = obj->userptr.mn;
+ if (mn == NULL)
+ return;
+
+ i915_mmu_notifier_del(mn->mmu, mn);
+ obj->userptr.mn = NULL;
+}
+
+static int
+i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
+ unsigned flags)
+{
+ struct i915_mmu_notifier *mmu;
+ struct i915_mmu_object *mn;
+ int ret;
+
+ if (flags & I915_USERPTR_UNSYNCHRONIZED)
+ return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
+
+ down_write(&obj->userptr.mm->mmap_sem);
+ ret = i915_mutex_lock_interruptible(obj->base.dev);
+ if (ret == 0) {
+ mmu = i915_mmu_notifier_get(obj->base.dev, obj->userptr.mm);
+ if (!IS_ERR(mmu))
+ mmu->count++; /* preemptive add to act as a refcount */
+ else
+ ret = PTR_ERR(mmu);
+ mutex_unlock(&obj->base.dev->struct_mutex);
+ }
+ up_write(&obj->userptr.mm->mmap_sem);
+ if (ret)
+ return ret;
+
+ mn = kzalloc(sizeof(*mn), GFP_KERNEL);
+ if (mn == NULL) {
+ ret = -ENOMEM;
+ goto destroy_mmu;
+ }
+
+ mn->mmu = mmu;
+ mn->it.start = obj->userptr.ptr;
+ mn->it.last = mn->it.start + obj->base.size - 1;
+ mn->obj = obj;
+
+ ret = i915_mmu_notifier_add(mmu, mn);
+ if (ret)
+ goto free_mn;
+
+ obj->userptr.mn = mn;
+ return 0;
+
+free_mn:
+ kfree(mn);
+destroy_mmu:
+ mutex_lock(&obj->base.dev->struct_mutex);
+ if (--mmu->count == 0)
+ __i915_mmu_notifier_destroy(mmu);
+ mutex_unlock(&obj->base.dev->struct_mutex);
+ return ret;
+}
+
+#else
+
+static void
+i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
+{
+}
+
+static int
+i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
+ unsigned flags)
+{
+ if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
+ return -ENODEV;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ return 0;
+}
+#endif
+
+struct get_pages_work {
+ struct work_struct work;
+ struct drm_i915_gem_object *obj;
+ struct task_struct *task;
+};
+
+
+#if IS_ENABLED(CONFIG_SWIOTLB)
+#define swiotlb_active() swiotlb_nr_tbl()
+#else
+#define swiotlb_active() 0
+#endif
+
+static int
+st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
+{
+ struct scatterlist *sg;
+ int ret, n;
+
+ *st = kmalloc(sizeof(**st), GFP_KERNEL);
+ if (*st == NULL)
+ return -ENOMEM;
+
+ if (swiotlb_active()) {
+ ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
+ if (ret)
+ goto err;
+
+ for_each_sg((*st)->sgl, sg, num_pages, n)
+ sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
+ } else {
+ ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
+ 0, num_pages << PAGE_SHIFT,
+ GFP_KERNEL);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ kfree(*st);
+ *st = NULL;
+ return ret;
+}
+
+static void
+__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
+{
+ struct get_pages_work *work = container_of(_work, typeof(*work), work);
+ struct drm_i915_gem_object *obj = work->obj;
+ struct drm_device *dev = obj->base.dev;
+ const int num_pages = obj->base.size >> PAGE_SHIFT;
+ struct page **pvec;
+ int pinned, ret;
+
+ ret = -ENOMEM;
+ pinned = 0;
+
+ pvec = kmalloc(num_pages*sizeof(struct page *),
+ GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+ if (pvec == NULL)
+ pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
+ if (pvec != NULL) {
+ struct mm_struct *mm = obj->userptr.mm;
+
+ down_read(&mm->mmap_sem);
+ while (pinned < num_pages) {
+ ret = get_user_pages(work->task, mm,
+ obj->userptr.ptr + pinned * PAGE_SIZE,
+ num_pages - pinned,
+ !obj->userptr.read_only, 0,
+ pvec + pinned, NULL);
+ if (ret < 0)
+ break;
+
+ pinned += ret;
+ }
+ up_read(&mm->mmap_sem);
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ if (obj->userptr.work != &work->work) {
+ ret = 0;
+ } else if (pinned == num_pages) {
+ ret = st_set_pages(&obj->pages, pvec, num_pages);
+ if (ret == 0) {
+ list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list);
+ pinned = 0;
+ }
+ }
+
+ obj->userptr.work = ERR_PTR(ret);
+ obj->userptr.workers--;
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&dev->struct_mutex);
+
+ release_pages(pvec, pinned, 0);
+ drm_free_large(pvec);
+
+ put_task_struct(work->task);
+ kfree(work);
+}
+
+static int
+i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
+{
+ const int num_pages = obj->base.size >> PAGE_SHIFT;
+ struct page **pvec;
+ int pinned, ret;
+
+ /* If userspace should engineer that these pages are replaced in
+ * the vma between us binding this page into the GTT and completion
+ * of rendering... Their loss. If they change the mapping of their
+ * pages they need to create a new bo to point to the new vma.
+ *
+ * However, that still leaves open the possibility of the vma
+ * being copied upon fork. Which falls under the same userspace
+ * synchronisation issue as a regular bo, except that this time
+ * the process may not be expecting that a particular piece of
+ * memory is tied to the GPU.
+ *
+ * Fortunately, we can hook into the mmu_notifier in order to
+ * discard the page references prior to anything nasty happening
+ * to the vma (discard or cloning) which should prevent the more
+ * egregious cases from causing harm.
+ */
+
+ pvec = NULL;
+ pinned = 0;
+ if (obj->userptr.mm == current->mm) {
+ pvec = kmalloc(num_pages*sizeof(struct page *),
+ GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+ if (pvec == NULL) {
+ pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
+ if (pvec == NULL)
+ return -ENOMEM;
+ }
+
+ pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
+ !obj->userptr.read_only, pvec);
+ }
+ if (pinned < num_pages) {
+ if (pinned < 0) {
+ ret = pinned;
+ pinned = 0;
+ } else {
+ /* Spawn a worker so that we can acquire the
+ * user pages without holding our mutex. Access
+ * to the user pages requires mmap_sem, and we have
+ * a strict lock ordering of mmap_sem, struct_mutex -
+ * we already hold struct_mutex here and so cannot
+ * call gup without encountering a lock inversion.
+ *
+ * Userspace will keep on repeating the operation
+ * (thanks to EAGAIN) until either we hit the fast
+ * path or the worker completes. If the worker is
+ * cancelled or superseded, the task is still run
+ * but the results ignored. (This leads to
+ * complications that we may have a stray object
+ * refcount that we need to be wary of when
+ * checking for existing objects during creation.)
+ * If the worker encounters an error, it reports
+ * that error back to this function through
+ * obj->userptr.work = ERR_PTR.
+ */
+ ret = -EAGAIN;
+ if (obj->userptr.work == NULL &&
+ obj->userptr.workers < I915_GEM_USERPTR_MAX_WORKERS) {
+ struct get_pages_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (work != NULL) {
+ obj->userptr.work = &work->work;
+ obj->userptr.workers++;
+
+ work->obj = obj;
+ drm_gem_object_reference(&obj->base);
+
+ work->task = current;
+ get_task_struct(work->task);
+
+ INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
+ schedule_work(&work->work);
+ } else
+ ret = -ENOMEM;
+ } else {
+ if (IS_ERR(obj->userptr.work)) {
+ ret = PTR_ERR(obj->userptr.work);
+ obj->userptr.work = NULL;
+ }
+ }
+ }
+ } else {
+ ret = st_set_pages(&obj->pages, pvec, num_pages);
+ if (ret == 0) {
+ obj->userptr.work = NULL;
+ pinned = 0;
+ }
+ }
+
+ release_pages(pvec, pinned, 0);
+ drm_free_large(pvec);
+ return ret;
+}
+
+static void
+i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
+{
+ struct scatterlist *sg;
+ int i;
+
+ BUG_ON(obj->userptr.work != NULL);
+
+ if (obj->madv != I915_MADV_WILLNEED)
+ obj->dirty = 0;
+
+ for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
+ struct page *page = sg_page(sg);
+
+ if (obj->dirty)
+ set_page_dirty(page);
+
+ mark_page_accessed(page);
+ page_cache_release(page);
+ }
+ obj->dirty = 0;
+
+ sg_free_table(obj->pages);
+ kfree(obj->pages);
+}
+
+static void
+i915_gem_userptr_release(struct drm_i915_gem_object *obj)
+{
+ i915_gem_userptr_release__mmu_notifier(obj);
+
+ if (obj->userptr.mm) {
+ mmput(obj->userptr.mm);
+ obj->userptr.mm = NULL;
+ }
+}
+
+static int
+i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
+{
+ if (obj->userptr.mn)
+ return 0;
+
+ return i915_gem_userptr_init__mmu_notifier(obj, 0);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
+ .dmabuf_export = i915_gem_userptr_dmabuf_export,
+ .get_pages = i915_gem_userptr_get_pages,
+ .put_pages = i915_gem_userptr_put_pages,
+ .release = i915_gem_userptr_release,
+};
+
+/**
+ * Creates a new mm object that wraps some normal memory from the process
+ * context - user memory.
+ *
+ * We impose several restrictions upon the memory being mapped
+ * into the GPU.
+ * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
+ * 2. It cannot overlap any other userptr object in the same address space.
+ * 3. It must be normal system memory, not a pointer into another map of IO
+ * space (e.g. it must not be a GTT mmapping of another object).
+ * 4. We only allow a bo as large as we could in theory map into the GTT,
+ * that is we limit the size to the total size of the GTT.
+ * 5. The bo is marked as being snoopable. The backing pages are left
+ * accessible directly by the CPU, but reads and writes by the GPU may
+ * incur the cost of a snoop (unless you have an LLC architecture).
+ *
+ * Synchronisation between multiple users and the GPU is left to userspace
+ * through the normal set-domain-ioctl. The kernel will enforce that the
+ * GPU relinquishes the VMA before it is returned back to the system
+ * i.e. upon free(), munmap() or process termination. However, the userspace
+ * malloc() library may not immediately relinquish the VMA after free() and
+ * instead reuse it whilst the GPU is still reading and writing to the VMA.
+ * Caveat emptor.
+ *
+ * Also note, that the object created here is not currently a "first class"
+ * object, in that several ioctls are banned. These are the CPU access
+ * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
+ * direct access via your pointer rather than use those ioctls.
+ *
+ * If you think this is a good interface to use to pass GPU memory between
+ * drivers, please use dma-buf instead. In fact, wherever possible use
+ * dma-buf instead.
+ */
+int
+i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_userptr *args = data;
+ struct drm_i915_gem_object *obj;
+ int ret;
+ u32 handle;
+
+ if (args->flags & ~(I915_USERPTR_READ_ONLY |
+ I915_USERPTR_UNSYNCHRONIZED))
+ return -EINVAL;
+
+ if (offset_in_page(args->user_ptr | args->user_size))
+ return -EINVAL;
+
+ if (args->user_size > dev_priv->gtt.base.total)
+ return -E2BIG;
+
+ if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
+ (char __user *)(unsigned long)args->user_ptr, args->user_size))
+ return -EFAULT;
+
+ if (args->flags & I915_USERPTR_READ_ONLY) {
+ /* On almost all of the current hw, we cannot tell the GPU that a
+ * page is readonly, so this is just a placeholder in the uAPI.
+ */
+ return -ENODEV;
+ }
+
+ /* Allocate the new object */
+ obj = i915_gem_object_alloc(dev);
+ if (obj == NULL)
+ return -ENOMEM;
+
+ drm_gem_private_object_init(dev, &obj->base, args->user_size);
+ i915_gem_object_init(obj, &i915_gem_userptr_ops);
+ obj->cache_level = I915_CACHE_LLC;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+
+ obj->userptr.ptr = args->user_ptr;
+ obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
+
+ /* And keep a pointer to the current->mm for resolving the user pages
+ * at binding. This means that we need to hook into the mmu_notifier
+ * in order to detect if the mmu is destroyed.
+ */
+ ret = -ENOMEM;
+ if ((obj->userptr.mm = get_task_mm(current)))
+ ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
+ if (ret == 0)
+ ret = drm_gem_handle_create(file, &obj->base, &handle);
+
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(&obj->base);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+ return 0;
+}
+
+int
+i915_gem_init_userptr(struct drm_device *dev)
+{
+#if defined(CONFIG_MMU_NOTIFIER)
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ hash_init(dev_priv->mmu_notifiers);
+#endif
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 2d81985..87ec60e 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -205,6 +205,7 @@
err_puts(m, tiling_flag(err->tiling));
err_puts(m, dirty_flag(err->dirty));
err_puts(m, purgeable_flag(err->purgeable));
+ err_puts(m, err->userptr ? " userptr" : "");
err_puts(m, err->ring != -1 ? " " : "");
err_puts(m, ring_str(err->ring));
err_puts(m, i915_cache_level_str(err->cache_level));
@@ -641,6 +642,7 @@
err->tiling = obj->tiling_mode;
err->dirty = obj->dirty;
err->purgeable = obj->madv != I915_MADV_WILLNEED;
+ err->userptr = obj->userptr.mm != NULL;
err->ring = obj->ring ? obj->ring->id : -1;
err->cache_level = obj->cache_level;
}
@@ -745,7 +747,7 @@
}
static void i915_record_ring_state(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+ struct intel_engine_cs *ring,
struct drm_i915_error_ring *ering)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -823,8 +825,8 @@
ering->hws = I915_READ(mmio);
}
- ering->cpu_ring_head = ring->head;
- ering->cpu_ring_tail = ring->tail;
+ ering->cpu_ring_head = ring->buffer->head;
+ ering->cpu_ring_tail = ring->buffer->tail;
ering->hangcheck_score = ring->hangcheck.score;
ering->hangcheck_action = ring->hangcheck.action;
@@ -857,7 +859,7 @@
}
-static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
+static void i915_gem_record_active_context(struct intel_engine_cs *ring,
struct drm_i915_error_state *error,
struct drm_i915_error_ring *ering)
{
@@ -884,7 +886,7 @@
int i, count;
for (i = 0; i < I915_NUM_RINGS; i++) {
- struct intel_ring_buffer *ring = &dev_priv->ring[i];
+ struct intel_engine_cs *ring = &dev_priv->ring[i];
if (ring->dev == NULL)
continue;
@@ -928,7 +930,7 @@
}
error->ring[i].ringbuffer =
- i915_error_ggtt_object_create(dev_priv, ring->obj);
+ i915_error_ggtt_object_create(dev_priv, ring->buffer->obj);
if (ring->status_page.obj)
error->ring[i].hws_page =
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2d761836..cbf31cb 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -248,6 +248,46 @@
return true;
}
+/**
+ * bdw_update_pm_irq - update GT interrupt 2
+ * @dev_priv: driver private
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
+ *
+ * Copied from the snb function, updated with relevant register offsets
+ */
+static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
+ uint32_t interrupt_mask,
+ uint32_t enabled_irq_mask)
+{
+ uint32_t new_val;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (WARN_ON(dev_priv->pm.irqs_disabled))
+ return;
+
+ new_val = dev_priv->pm_irq_mask;
+ new_val &= ~interrupt_mask;
+ new_val |= (~enabled_irq_mask & interrupt_mask);
+
+ if (new_val != dev_priv->pm_irq_mask) {
+ dev_priv->pm_irq_mask = new_val;
+ I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask);
+ POSTING_READ(GEN8_GT_IMR(2));
+ }
+}
+
+void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+ bdw_update_pm_irq(dev_priv, mask, mask);
+}
+
+void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+ bdw_update_pm_irq(dev_priv, mask, 0);
+}
+
static bool cpt_can_enable_serr_int(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -266,16 +306,50 @@
return true;
}
-static void i9xx_clear_fifo_underrun(struct drm_device *dev, enum pipe pipe)
+void i9xx_check_fifo_underruns(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+
+ for_each_intel_crtc(dev, crtc) {
+ u32 reg = PIPESTAT(crtc->pipe);
+ u32 pipestat;
+
+ if (crtc->cpu_fifo_underrun_disabled)
+ continue;
+
+ pipestat = I915_READ(reg) & 0xffff0000;
+ if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
+ continue;
+
+ I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
+ POSTING_READ(reg);
+
+ DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
+ }
+
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+}
+
+static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg = PIPESTAT(pipe);
- u32 pipestat = I915_READ(reg) & 0x7fff0000;
+ u32 pipestat = I915_READ(reg) & 0xffff0000;
assert_spin_locked(&dev_priv->irq_lock);
- I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
- POSTING_READ(reg);
+ if (enable) {
+ I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
+ POSTING_READ(reg);
+ } else {
+ if (pipestat & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ }
}
static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -303,15 +377,11 @@
ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
} else {
- bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
-
- /* Change the state _after_ we've read out the current one. */
ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
- if (!was_enabled &&
- (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
- DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
- pipe_name(pipe));
+ if (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
+ DRM_ERROR("uncleared fifo underrun on pipe %c\n",
+ pipe_name(pipe));
}
}
}
@@ -387,16 +457,11 @@
ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
} else {
- uint32_t tmp = I915_READ(SERR_INT);
- bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
-
- /* Change the state _after_ we've read out the current one. */
ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
- if (!was_enabled &&
- (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
- DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
- transcoder_name(pch_transcoder));
+ if (I915_READ(SERR_INT) & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
+ DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
+ transcoder_name(pch_transcoder));
}
}
}
@@ -415,8 +480,8 @@
*
* Returns the previous state of underrun reporting.
*/
-bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe, bool enable)
+static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
@@ -432,8 +497,8 @@
intel_crtc->cpu_fifo_underrun_disabled = !enable;
- if (enable && (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)))
- i9xx_clear_fifo_underrun(dev, pipe);
+ if (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
+ i9xx_set_fifo_underrun_reporting(dev, pipe, enable);
else if (IS_GEN5(dev) || IS_GEN6(dev))
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
else if (IS_GEN7(dev))
@@ -578,11 +643,17 @@
u32 enable_mask = status_mask << 16;
/*
- * On pipe A we don't support the PSR interrupt yet, on pipe B the
- * same bit MBZ.
+ * On pipe A we don't support the PSR interrupt yet,
+ * on pipe B and C the same bit MBZ.
*/
if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
return 0;
+ /*
+ * On pipe B and C we don't support the PSR interrupt yet, on pipe
+ * A the same bit is for perf counters which we don't use either.
+ */
+ if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
+ return 0;
enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
SPRITE0_FLIP_DONE_INT_EN_VLV |
@@ -669,6 +740,56 @@
}
}
+/*
+ * This timing diagram depicts the video signal in and
+ * around the vertical blanking period.
+ *
+ * Assumptions about the fictitious mode used in this example:
+ * vblank_start >= 3
+ * vsync_start = vblank_start + 1
+ * vsync_end = vblank_start + 2
+ * vtotal = vblank_start + 3
+ *
+ * start of vblank:
+ * latch double buffered registers
+ * increment frame counter (ctg+)
+ * generate start of vblank interrupt (gen4+)
+ * |
+ * | frame start:
+ * | generate frame start interrupt (aka. vblank interrupt) (gmch)
+ * | may be shifted forward 1-3 extra lines via PIPECONF
+ * | |
+ * | | start of vsync:
+ * | | generate vsync interrupt
+ * | | |
+ * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
+ * . \hs/ . \hs/ \hs/ \hs/ . \hs/
+ * ----va---> <-----------------vb--------------------> <--------va-------------
+ * | | <----vs-----> |
+ * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
+ * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
+ * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
+ * | | |
+ * last visible pixel first visible pixel
+ * | increment frame counter (gen3/4)
+ * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
+ *
+ * x = horizontal active
+ * _ = horizontal blanking
+ * hs = horizontal sync
+ * va = vertical active
+ * vb = vertical blanking
+ * vs = vertical sync
+ * vbs = vblank_start (number)
+ *
+ * Summary:
+ * - most events happen at the start of horizontal sync
+ * - frame start happens at the start of horizontal blank, 1-4 lines
+ * (depending on PIPECONF settings) after the start of vblank
+ * - gen3/4 pixel and frame counter are synchronized with the start
+ * of horizontal active on the first line of vertical active
+ */
+
static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
{
/* Gen2 doesn't have a hardware frame counter */
@@ -683,7 +804,7 @@
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long high_frame;
unsigned long low_frame;
- u32 high1, high2, low, pixel, vbl_start;
+ u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
@@ -697,17 +818,28 @@
const struct drm_display_mode *mode =
&intel_crtc->config.adjusted_mode;
- vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
+ htotal = mode->crtc_htotal;
+ hsync_start = mode->crtc_hsync_start;
+ vbl_start = mode->crtc_vblank_start;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vbl_start = DIV_ROUND_UP(vbl_start, 2);
} else {
enum transcoder cpu_transcoder = (enum transcoder) pipe;
- u32 htotal;
htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
+ hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
-
- vbl_start *= htotal;
+ if ((I915_READ(PIPECONF(cpu_transcoder)) &
+ PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
+ vbl_start = DIV_ROUND_UP(vbl_start, 2);
}
+ /* Convert to pixel count */
+ vbl_start *= htotal;
+
+ /* Start of vblank event occurs at start of hsync */
+ vbl_start -= htotal - hsync_start;
+
high_frame = PIPEFRAME(pipe);
low_frame = PIPEFRAMEPIXEL(pipe);
@@ -757,9 +889,9 @@
struct drm_i915_private *dev_priv = dev->dev_private;
const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
enum pipe pipe = crtc->pipe;
- int vtotal = mode->crtc_vtotal;
- int position;
+ int position, vtotal;
+ vtotal = mode->crtc_vtotal;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vtotal /= 2;
@@ -769,14 +901,10 @@
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
/*
- * Scanline counter increments at leading edge of hsync, and
- * it starts counting from vtotal-1 on the first active line.
- * That means the scanline counter value is always one less
- * than what we would expect. Ie. just after start of vblank,
- * which also occurs at start of hsync (on the last active line),
- * the scanline counter will read vblank_start-1.
+ * See update_scanline_offset() for the details on the
+ * scanline_offset adjustment.
*/
- return (position + 1) % vtotal;
+ return (position + crtc->scanline_offset) % vtotal;
}
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
@@ -844,6 +972,18 @@
vtotal *= htotal;
/*
+ * In interlaced modes, the pixel counter counts all pixels,
+ * so one field will have htotal more pixels. In order to avoid
+ * the reported position from jumping backwards when the pixel
+ * counter is beyond the length of the shorter field, just
+ * clamp the position the length of the shorter field. This
+ * matches how the scanline counter based position works since
+ * the scanline counter doesn't count the two half lines.
+ */
+ if (position >= vtotal)
+ position = vtotal - 1;
+
+ /*
* Start of vblank interrupt is triggered at start of hsync,
* just prior to the first active line of vblank. However we
* consider lines to start at the leading edge of horizontal
@@ -949,7 +1089,7 @@
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.id,
- drm_get_connector_name(connector),
+ connector->name,
drm_get_connector_status_name(old_status),
drm_get_connector_status_name(connector->status));
@@ -994,7 +1134,7 @@
connector->polled == DRM_CONNECTOR_POLL_HPD) {
DRM_INFO("HPD interrupt storm detected on connector %s: "
"switching from hotplug detection to polling\n",
- drm_get_connector_name(connector));
+ connector->name);
dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
connector->polled = DRM_CONNECTOR_POLL_CONNECT
| DRM_CONNECTOR_POLL_DISCONNECT;
@@ -1002,7 +1142,7 @@
}
if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
- drm_get_connector_name(connector), intel_encoder->hpd_pin);
+ connector->name, intel_encoder->hpd_pin);
}
}
/* if there were no outputs to poll, poll was disabled,
@@ -1077,9 +1217,9 @@
}
static void notify_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
- if (ring->obj == NULL)
+ if (!intel_ring_initialized(ring))
return;
trace_i915_gem_request_complete(ring);
@@ -1098,8 +1238,12 @@
spin_lock_irq(&dev_priv->irq_lock);
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
- /* Make sure not to corrupt PMIMR state used by ringbuffer code */
- snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ if (IS_BROADWELL(dev_priv->dev))
+ bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ else {
+ /* Make sure not to corrupt PMIMR state used by ringbuffer */
+ snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ }
spin_unlock_irq(&dev_priv->irq_lock);
/* Make sure we didn't queue anything we're not going to process. */
@@ -1296,6 +1440,19 @@
ivybridge_parity_error_irq_handler(dev, gt_iir);
}
+static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
+{
+ if ((pm_iir & dev_priv->pm_rps_events) == 0)
+ return;
+
+ spin_lock(&dev_priv->irq_lock);
+ dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
+ bdw_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+ spin_unlock(&dev_priv->irq_lock);
+
+ queue_work(dev_priv->wq, &dev_priv->rps.work);
+}
+
static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
u32 master_ctl)
@@ -1334,6 +1491,17 @@
DRM_ERROR("The master control interrupt lied (GT1)!\n");
}
+ if (master_ctl & GEN8_GT_PM_IRQ) {
+ tmp = I915_READ(GEN8_GT_IIR(2));
+ if (tmp & dev_priv->pm_rps_events) {
+ ret = IRQ_HANDLED;
+ gen8_rps_irq_handler(dev_priv, tmp);
+ I915_WRITE(GEN8_GT_IIR(2),
+ tmp & dev_priv->pm_rps_events);
+ } else
+ DRM_ERROR("The master control interrupt lied (PM)!\n");
+ }
+
if (master_ctl & GEN8_GT_VECS_IRQ) {
tmp = I915_READ(GEN8_GT_IIR(3));
if (tmp) {
@@ -1598,6 +1766,9 @@
case PIPE_B:
iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
break;
+ case PIPE_C:
+ iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
+ break;
}
if (iir & iir_bit)
mask |= dev_priv->pipestat_irq_mask[pipe];
@@ -1668,7 +1839,7 @@
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = (struct drm_device *) arg;
+ struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 iir, gt_iir, pm_iir;
irqreturn_t ret = IRQ_NONE;
@@ -1703,6 +1874,40 @@
return ret;
}
+static irqreturn_t cherryview_irq_handler(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 master_ctl, iir;
+ irqreturn_t ret = IRQ_NONE;
+
+ for (;;) {
+ master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
+ iir = I915_READ(VLV_IIR);
+
+ if (master_ctl == 0 && iir == 0)
+ break;
+
+ I915_WRITE(GEN8_MASTER_IRQ, 0);
+
+ gen8_gt_irq_handler(dev, dev_priv, master_ctl);
+
+ valleyview_pipestat_irq_handler(dev, iir);
+
+ /* Consume port. Then clear IIR or we'll miss events */
+ i9xx_hpd_irq_handler(dev);
+
+ I915_WRITE(VLV_IIR, iir);
+
+ I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
+ POSTING_READ(GEN8_MASTER_IRQ);
+
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1935,7 +2140,7 @@
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = (struct drm_device *) arg;
+ struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
@@ -2111,7 +2316,7 @@
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
bool reset_completed)
{
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int i;
/*
@@ -2544,14 +2749,14 @@
}
static u32
-ring_last_seqno(struct intel_ring_buffer *ring)
+ring_last_seqno(struct intel_engine_cs *ring)
{
return list_entry(ring->request_list.prev,
struct drm_i915_gem_request, list)->seqno;
}
static bool
-ring_idle(struct intel_ring_buffer *ring, u32 seqno)
+ring_idle(struct intel_engine_cs *ring, u32 seqno)
{
return (list_empty(&ring->request_list) ||
i915_seqno_passed(seqno, ring_last_seqno(ring)));
@@ -2574,11 +2779,11 @@
}
}
-static struct intel_ring_buffer *
-semaphore_wait_to_signaller_ring(struct intel_ring_buffer *ring, u32 ipehr)
+static struct intel_engine_cs *
+semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
- struct intel_ring_buffer *signaller;
+ struct intel_engine_cs *signaller;
int i;
if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
@@ -2606,8 +2811,8 @@
return NULL;
}
-static struct intel_ring_buffer *
-semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
+static struct intel_engine_cs *
+semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 cmd, ipehr, head;
@@ -2632,10 +2837,10 @@
* our ring is smaller than what the hardware (and hence
* HEAD_ADDR) allows. Also handles wrap-around.
*/
- head &= ring->size - 1;
+ head &= ring->buffer->size - 1;
/* This here seems to blow up */
- cmd = ioread32(ring->virtual_start + head);
+ cmd = ioread32(ring->buffer->virtual_start + head);
if (cmd == ipehr)
break;
@@ -2645,14 +2850,14 @@
if (!i)
return NULL;
- *seqno = ioread32(ring->virtual_start + head + 4) + 1;
+ *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
return semaphore_wait_to_signaller_ring(ring, ipehr);
}
-static int semaphore_passed(struct intel_ring_buffer *ring)
+static int semaphore_passed(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
- struct intel_ring_buffer *signaller;
+ struct intel_engine_cs *signaller;
u32 seqno, ctl;
ring->hangcheck.deadlock = true;
@@ -2671,7 +2876,7 @@
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
{
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int i;
for_each_ring(ring, dev_priv, i)
@@ -2679,7 +2884,7 @@
}
static enum intel_ring_hangcheck_action
-ring_stuck(struct intel_ring_buffer *ring, u64 acthd)
+ring_stuck(struct intel_engine_cs *ring, u64 acthd)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2735,7 +2940,7 @@
{
struct drm_device *dev = (struct drm_device *)data;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int i;
int busy_count = 0, rings_hung = 0;
bool stuck[I915_NUM_RINGS] = { 0 };
@@ -2974,6 +3179,37 @@
gen8_irq_reset(dev);
}
+static void cherryview_irq_preinstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+
+ I915_WRITE(GEN8_MASTER_IRQ, 0);
+ POSTING_READ(GEN8_MASTER_IRQ);
+
+ GEN8_IRQ_RESET_NDX(GT, 0);
+ GEN8_IRQ_RESET_NDX(GT, 1);
+ GEN8_IRQ_RESET_NDX(GT, 2);
+ GEN8_IRQ_RESET_NDX(GT, 3);
+
+ GEN5_IRQ_RESET(GEN8_PCU_);
+
+ POSTING_READ(GEN8_PCU_IIR);
+
+ I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
+
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+
+ I915_WRITE(VLV_IMR, 0xffffffff);
+ I915_WRITE(VLV_IER, 0x0);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ POSTING_READ(VLV_IIR);
+}
+
static void ibx_hpd_irq_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3252,6 +3488,8 @@
for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]);
+
+ dev_priv->pm_irq_mask = 0xffffffff;
}
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
@@ -3291,6 +3529,45 @@
return 0;
}
+static int cherryview_irq_postinstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
+ u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
+ PIPE_CRC_DONE_INTERRUPT_STATUS;
+ unsigned long irqflags;
+ int pipe;
+
+ /*
+ * Leave vblank interrupts masked initially. enable/disable will
+ * toggle them based on usage.
+ */
+ dev_priv->irq_mask = ~enable_mask;
+
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ for_each_pipe(pipe)
+ i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+ I915_WRITE(VLV_IER, enable_mask);
+
+ gen8_gt_irq_postinstall(dev_priv);
+
+ I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
+ POSTING_READ(GEN8_MASTER_IRQ);
+
+ return 0;
+}
+
static void gen8_irq_uninstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3336,6 +3613,57 @@
POSTING_READ(VLV_IER);
}
+static void cherryview_irq_uninstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+
+ if (!dev_priv)
+ return;
+
+ I915_WRITE(GEN8_MASTER_IRQ, 0);
+ POSTING_READ(GEN8_MASTER_IRQ);
+
+#define GEN8_IRQ_FINI_NDX(type, which) \
+do { \
+ I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
+ I915_WRITE(GEN8_##type##_IER(which), 0); \
+ I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
+ POSTING_READ(GEN8_##type##_IIR(which)); \
+ I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
+} while (0)
+
+#define GEN8_IRQ_FINI(type) \
+do { \
+ I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
+ I915_WRITE(GEN8_##type##_IER, 0); \
+ I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
+ POSTING_READ(GEN8_##type##_IIR); \
+ I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
+} while (0)
+
+ GEN8_IRQ_FINI_NDX(GT, 0);
+ GEN8_IRQ_FINI_NDX(GT, 1);
+ GEN8_IRQ_FINI_NDX(GT, 2);
+ GEN8_IRQ_FINI_NDX(GT, 3);
+
+ GEN8_IRQ_FINI(PCU);
+
+#undef GEN8_IRQ_FINI
+#undef GEN8_IRQ_FINI_NDX
+
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+
+ I915_WRITE(VLV_IMR, 0xffffffff);
+ I915_WRITE(VLV_IER, 0x0);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ POSTING_READ(VLV_IIR);
+}
+
static void ironlake_irq_uninstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3427,7 +3755,7 @@
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = (struct drm_device *) arg;
+ struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u16 iir, new_iir;
u32 pipe_stats[2];
@@ -3612,7 +3940,7 @@
static irqreturn_t i915_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = (struct drm_device *) arg;
+ struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
unsigned long irqflags;
@@ -3842,7 +4170,7 @@
static irqreturn_t i965_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = (struct drm_device *) arg;
+ struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 iir, new_iir;
u32 pipe_stats[I915_MAX_PIPES];
@@ -3993,7 +4321,7 @@
if (intel_connector->encoder->hpd_pin == i) {
if (connector->polled != intel_connector->polled)
DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
- drm_get_connector_name(connector));
+ connector->name);
connector->polled = intel_connector->polled;
if (!connector->polled)
connector->polled = DRM_CONNECTOR_POLL_HPD;
@@ -4041,7 +4369,15 @@
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
}
- if (IS_VALLEYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev)) {
+ dev->driver->irq_handler = cherryview_irq_handler;
+ dev->driver->irq_preinstall = cherryview_irq_preinstall;
+ dev->driver->irq_postinstall = cherryview_irq_postinstall;
+ dev->driver->irq_uninstall = cherryview_irq_uninstall;
+ dev->driver->enable_vblank = valleyview_enable_vblank;
+ dev->driver->disable_vblank = valleyview_disable_vblank;
+ dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+ } else if (IS_VALLEYVIEW(dev)) {
dev->driver->irq_handler = valleyview_irq_handler;
dev->driver->irq_preinstall = valleyview_irq_preinstall;
dev->driver->irq_postinstall = valleyview_irq_postinstall;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 0eff337..5122254 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -29,6 +29,8 @@
#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
+#define _PIPE3(pipe, a, b, c) (pipe < 2 ? _PIPE(pipe, a, b) : c)
+#define _PORT3(port, a, b, c) (port < 2 ? _PORT(port, a, b) : c)
#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
#define _MASKED_BIT_DISABLE(a) ((a) << 16)
@@ -77,13 +79,19 @@
/* Graphics reset regs */
#define I965_GDRST 0xc0 /* PCI config register */
-#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
#define GRDOM_FULL (0<<2)
#define GRDOM_RENDER (1<<2)
#define GRDOM_MEDIA (3<<2)
#define GRDOM_MASK (3<<2)
#define GRDOM_RESET_ENABLE (1<<0)
+#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
+#define ILK_GRDOM_FULL (0<<1)
+#define ILK_GRDOM_RENDER (1<<1)
+#define ILK_GRDOM_MEDIA (3<<1)
+#define ILK_GRDOM_MASK (3<<1)
+#define ILK_GRDOM_RESET_ENABLE (1<<0)
+
#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */
#define GEN6_MBC_SNPCR_SHIFT 21
#define GEN6_MBC_SNPCR_MASK (3<<21)
@@ -92,6 +100,9 @@
#define GEN6_MBC_SNPCR_LOW (2<<21)
#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */
+#define VLV_G3DCTL 0x9024
+#define VLV_GSCKGCTL 0x9028
+
#define GEN6_MBCTL 0x0907c
#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4)
#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3)
@@ -467,6 +478,7 @@
#define IOSF_PORT_PUNIT 0x4
#define IOSF_PORT_NC 0x11
#define IOSF_PORT_DPIO 0x12
+#define IOSF_PORT_DPIO_2 0x1a
#define IOSF_PORT_GPIO_NC 0x13
#define IOSF_PORT_CCK 0x14
#define IOSF_PORT_CCU 0xA9
@@ -478,9 +490,6 @@
/* See configdb bunit SB addr map */
#define BUNIT_REG_BISOC 0x11
-#define PUNIT_OPCODE_REG_READ 6
-#define PUNIT_OPCODE_REG_WRITE 7
-
#define PUNIT_REG_DSPFREQ 0x36
#define DSPFREQSTAT_SHIFT 30
#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT)
@@ -566,16 +575,91 @@
#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
#define CCK_DISPLAY_CLOCK_CONTROL 0x6b
+/**
+ * DOC: DPIO
+ *
+ * VLV and CHV have slightly peculiar display PHYs for driving DP/HDMI
+ * ports. DPIO is the name given to such a display PHY. These PHYs
+ * don't follow the standard programming model using direct MMIO
+ * registers, and instead their registers must be accessed trough IOSF
+ * sideband. VLV has one such PHY for driving ports B and C, and CHV
+ * adds another PHY for driving port D. Each PHY responds to specific
+ * IOSF-SB port.
+ *
+ * Each display PHY is made up of one or two channels. Each channel
+ * houses a common lane part which contains the PLL and other common
+ * logic. CH0 common lane also contains the IOSF-SB logic for the
+ * Common Register Interface (CRI) ie. the DPIO registers. CRI clock
+ * must be running when any DPIO registers are accessed.
+ *
+ * In addition to having their own registers, the PHYs are also
+ * controlled through some dedicated signals from the display
+ * controller. These include PLL reference clock enable, PLL enable,
+ * and CRI clock selection, for example.
+ *
+ * Eeach channel also has two splines (also called data lanes), and
+ * each spline is made up of one Physical Access Coding Sub-Layer
+ * (PCS) block and two TX lanes. So each channel has two PCS blocks
+ * and four TX lanes. The TX lanes are used as DP lanes or TMDS
+ * data/clock pairs depending on the output type.
+ *
+ * Additionally the PHY also contains an AUX lane with AUX blocks
+ * for each channel. This is used for DP AUX communication, but
+ * this fact isn't really relevant for the driver since AUX is
+ * controlled from the display controller side. No DPIO registers
+ * need to be accessed during AUX communication,
+ *
+ * Generally the common lane corresponds to the pipe and
+ * the spline (PCS/TX) correponds to the port.
+ *
+ * For dual channel PHY (VLV/CHV):
+ *
+ * pipe A == CMN/PLL/REF CH0
+ *
+ * pipe B == CMN/PLL/REF CH1
+ *
+ * port B == PCS/TX CH0
+ *
+ * port C == PCS/TX CH1
+ *
+ * This is especially important when we cross the streams
+ * ie. drive port B with pipe B, or port C with pipe A.
+ *
+ * For single channel PHY (CHV):
+ *
+ * pipe C == CMN/PLL/REF CH0
+ *
+ * port D == PCS/TX CH0
+ *
+ * Note: digital port B is DDI0, digital port C is DDI1,
+ * digital port D is DDI2
+ */
/*
- * DPIO - a special bus for various display related registers to hide behind
+ * Dual channel PHY (VLV/CHV)
+ * ---------------------------------
+ * | CH0 | CH1 |
+ * | CMN/PLL/REF | CMN/PLL/REF |
+ * |---------------|---------------| Display PHY
+ * | PCS01 | PCS23 | PCS01 | PCS23 |
+ * |-------|-------|-------|-------|
+ * |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
+ * ---------------------------------
+ * | DDI0 | DDI1 | DP/HDMI ports
+ * ---------------------------------
*
- * DPIO is VLV only.
- *
- * Note: digital port B is DDI0, digital pot C is DDI1
+ * Single channel PHY (CHV)
+ * -----------------
+ * | CH0 |
+ * | CMN/PLL/REF |
+ * |---------------| Display PHY
+ * | PCS01 | PCS23 |
+ * |-------|-------|
+ * |TX0|TX1|TX2|TX3|
+ * -----------------
+ * | DDI2 | DP/HDMI port
+ * -----------------
*/
#define DPIO_DEVFN 0
-#define DPIO_OPCODE_REG_WRITE 1
-#define DPIO_OPCODE_REG_READ 0
#define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110)
#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
@@ -652,14 +736,29 @@
#define DPIO_PCS_TX_LANE1_RESET (1<<7)
#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
+#define _VLV_PCS01_DW0_CH0 0x200
+#define _VLV_PCS23_DW0_CH0 0x400
+#define _VLV_PCS01_DW0_CH1 0x2600
+#define _VLV_PCS23_DW0_CH1 0x2800
+#define VLV_PCS01_DW0(ch) _PORT(ch, _VLV_PCS01_DW0_CH0, _VLV_PCS01_DW0_CH1)
+#define VLV_PCS23_DW0(ch) _PORT(ch, _VLV_PCS23_DW0_CH0, _VLV_PCS23_DW0_CH1)
+
#define _VLV_PCS_DW1_CH0 0x8204
#define _VLV_PCS_DW1_CH1 0x8404
+#define CHV_PCS_REQ_SOFTRESET_EN (1<<23)
#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1<<22)
#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21)
#define DPIO_PCS_CLK_DATAWIDTH_SHIFT (6)
#define DPIO_PCS_CLK_SOFT_RESET (1<<5)
#define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1)
+#define _VLV_PCS01_DW1_CH0 0x204
+#define _VLV_PCS23_DW1_CH0 0x404
+#define _VLV_PCS01_DW1_CH1 0x2604
+#define _VLV_PCS23_DW1_CH1 0x2804
+#define VLV_PCS01_DW1(ch) _PORT(ch, _VLV_PCS01_DW1_CH0, _VLV_PCS01_DW1_CH1)
+#define VLV_PCS23_DW1(ch) _PORT(ch, _VLV_PCS23_DW1_CH0, _VLV_PCS23_DW1_CH1)
+
#define _VLV_PCS_DW8_CH0 0x8220
#define _VLV_PCS_DW8_CH1 0x8420
#define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1)
@@ -675,6 +774,19 @@
#define _VLV_PCS_DW9_CH1 0x8424
#define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
+#define _CHV_PCS_DW10_CH0 0x8228
+#define _CHV_PCS_DW10_CH1 0x8428
+#define DPIO_PCS_SWING_CALC_TX0_TX2 (1<<30)
+#define DPIO_PCS_SWING_CALC_TX1_TX3 (1<<31)
+#define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1)
+
+#define _VLV_PCS01_DW10_CH0 0x0228
+#define _VLV_PCS23_DW10_CH0 0x0428
+#define _VLV_PCS01_DW10_CH1 0x2628
+#define _VLV_PCS23_DW10_CH1 0x2828
+#define VLV_PCS01_DW10(port) _PORT(port, _VLV_PCS01_DW10_CH0, _VLV_PCS01_DW10_CH1)
+#define VLV_PCS23_DW10(port) _PORT(port, _VLV_PCS23_DW10_CH0, _VLV_PCS23_DW10_CH1)
+
#define _VLV_PCS_DW11_CH0 0x822c
#define _VLV_PCS_DW11_CH1 0x842c
#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
@@ -693,14 +805,21 @@
#define _VLV_TX_DW2_CH0 0x8288
#define _VLV_TX_DW2_CH1 0x8488
+#define DPIO_SWING_MARGIN_SHIFT 16
+#define DPIO_SWING_MARGIN_MASK (0xff << DPIO_SWING_MARGIN_SHIFT)
+#define DPIO_UNIQ_TRANS_SCALE_SHIFT 8
#define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1)
#define _VLV_TX_DW3_CH0 0x828c
#define _VLV_TX_DW3_CH1 0x848c
+/* The following bit for CHV phy */
+#define DPIO_TX_UNIQ_TRANS_SCALE_EN (1<<27)
#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1)
#define _VLV_TX_DW4_CH0 0x8290
#define _VLV_TX_DW4_CH1 0x8490
+#define DPIO_SWING_DEEMPH9P5_SHIFT 24
+#define DPIO_SWING_DEEMPH9P5_MASK (0xff << DPIO_SWING_DEEMPH9P5_SHIFT)
#define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1)
#define _VLV_TX3_DW4_CH0 0x690
@@ -720,6 +839,73 @@
#define _VLV_TX_DW14_CH1 0x84b8
#define VLV_TX_DW14(ch) _PORT(ch, _VLV_TX_DW14_CH0, _VLV_TX_DW14_CH1)
+/* CHV dpPhy registers */
+#define _CHV_PLL_DW0_CH0 0x8000
+#define _CHV_PLL_DW0_CH1 0x8180
+#define CHV_PLL_DW0(ch) _PIPE(ch, _CHV_PLL_DW0_CH0, _CHV_PLL_DW0_CH1)
+
+#define _CHV_PLL_DW1_CH0 0x8004
+#define _CHV_PLL_DW1_CH1 0x8184
+#define DPIO_CHV_N_DIV_SHIFT 8
+#define DPIO_CHV_M1_DIV_BY_2 (0 << 0)
+#define CHV_PLL_DW1(ch) _PIPE(ch, _CHV_PLL_DW1_CH0, _CHV_PLL_DW1_CH1)
+
+#define _CHV_PLL_DW2_CH0 0x8008
+#define _CHV_PLL_DW2_CH1 0x8188
+#define CHV_PLL_DW2(ch) _PIPE(ch, _CHV_PLL_DW2_CH0, _CHV_PLL_DW2_CH1)
+
+#define _CHV_PLL_DW3_CH0 0x800c
+#define _CHV_PLL_DW3_CH1 0x818c
+#define DPIO_CHV_FRAC_DIV_EN (1 << 16)
+#define DPIO_CHV_FIRST_MOD (0 << 8)
+#define DPIO_CHV_SECOND_MOD (1 << 8)
+#define DPIO_CHV_FEEDFWD_GAIN_SHIFT 0
+#define CHV_PLL_DW3(ch) _PIPE(ch, _CHV_PLL_DW3_CH0, _CHV_PLL_DW3_CH1)
+
+#define _CHV_PLL_DW6_CH0 0x8018
+#define _CHV_PLL_DW6_CH1 0x8198
+#define DPIO_CHV_GAIN_CTRL_SHIFT 16
+#define DPIO_CHV_INT_COEFF_SHIFT 8
+#define DPIO_CHV_PROP_COEFF_SHIFT 0
+#define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1)
+
+#define _CHV_CMN_DW13_CH0 0x8134
+#define _CHV_CMN_DW0_CH1 0x8080
+#define DPIO_CHV_S1_DIV_SHIFT 21
+#define DPIO_CHV_P1_DIV_SHIFT 13 /* 3 bits */
+#define DPIO_CHV_P2_DIV_SHIFT 8 /* 5 bits */
+#define DPIO_CHV_K_DIV_SHIFT 4
+#define DPIO_PLL_FREQLOCK (1 << 1)
+#define DPIO_PLL_LOCK (1 << 0)
+#define CHV_CMN_DW13(ch) _PIPE(ch, _CHV_CMN_DW13_CH0, _CHV_CMN_DW0_CH1)
+
+#define _CHV_CMN_DW14_CH0 0x8138
+#define _CHV_CMN_DW1_CH1 0x8084
+#define DPIO_AFC_RECAL (1 << 14)
+#define DPIO_DCLKP_EN (1 << 13)
+#define CHV_CMN_DW14(ch) _PIPE(ch, _CHV_CMN_DW14_CH0, _CHV_CMN_DW1_CH1)
+
+#define CHV_CMN_DW30 0x8178
+#define DPIO_LRC_BYPASS (1 << 3)
+
+#define _TXLANE(ch, lane, offset) ((ch ? 0x2400 : 0) + \
+ (lane) * 0x200 + (offset))
+
+#define CHV_TX_DW0(ch, lane) _TXLANE(ch, lane, 0x80)
+#define CHV_TX_DW1(ch, lane) _TXLANE(ch, lane, 0x84)
+#define CHV_TX_DW2(ch, lane) _TXLANE(ch, lane, 0x88)
+#define CHV_TX_DW3(ch, lane) _TXLANE(ch, lane, 0x8c)
+#define CHV_TX_DW4(ch, lane) _TXLANE(ch, lane, 0x90)
+#define CHV_TX_DW5(ch, lane) _TXLANE(ch, lane, 0x94)
+#define CHV_TX_DW6(ch, lane) _TXLANE(ch, lane, 0x98)
+#define CHV_TX_DW7(ch, lane) _TXLANE(ch, lane, 0x9c)
+#define CHV_TX_DW8(ch, lane) _TXLANE(ch, lane, 0xa0)
+#define CHV_TX_DW9(ch, lane) _TXLANE(ch, lane, 0xa4)
+#define CHV_TX_DW10(ch, lane) _TXLANE(ch, lane, 0xa8)
+#define CHV_TX_DW11(ch, lane) _TXLANE(ch, lane, 0xac)
+#define DPIO_FRC_LATENCY_SHFIT 8
+#define CHV_TX_DW14(ch, lane) _TXLANE(ch, lane, 0xb8)
+#define DPIO_UPAR_SHIFT 30
/*
* Fence registers
*/
@@ -786,9 +972,20 @@
#define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
-#define ARB_MODE 0x04030
+
+#define GEN7_WR_WATERMARK 0x4028
+#define GEN7_GFX_PRIO_CTRL 0x402C
+#define ARB_MODE 0x4030
#define ARB_MODE_SWIZZLE_SNB (1<<4)
#define ARB_MODE_SWIZZLE_IVB (1<<5)
+#define GEN7_GFX_PEND_TLB0 0x4034
+#define GEN7_GFX_PEND_TLB1 0x4038
+/* L3, CVS, ZTLB, RCC, CASC LRA min, max values */
+#define GEN7_LRA_LIMITS_BASE 0x403C
+#define GEN7_LRA_LIMITS_REG_NUM 13
+#define GEN7_MEDIA_MAX_REQ_COUNT 0x4070
+#define GEN7_GFX_MAX_REQ_COUNT 0x4074
+
#define GAMTARBMODE 0x04a08
#define ARB_MODE_BWGTLB_DISABLE (1<<9)
#define ARB_MODE_SWIZZLE_BDW (1<<1)
@@ -823,6 +1020,9 @@
#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */
#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
+
+#define GEN7_TLB_RD_ADDR 0x4700
+
#if 0
#define PRB0_TAIL 0x02030
#define PRB0_HEAD 0x02034
@@ -948,14 +1148,19 @@
#define GFX_PPGTT_ENABLE (1<<9)
#define VLV_DISPLAY_BASE 0x180000
+#define VLV_MIPI_BASE VLV_DISPLAY_BASE
+#define VLV_GU_CTL0 (VLV_DISPLAY_BASE + 0x2030)
+#define VLV_GU_CTL1 (VLV_DISPLAY_BASE + 0x2034)
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
#define IIR 0x020a4
#define IMR 0x020a8
#define ISR 0x020ac
#define VLV_GUNIT_CLOCK_GATE (VLV_DISPLAY_BASE + 0x2060)
+#define GINT_DIS (1<<22)
#define GCFG_DIS (1<<8)
+#define VLV_GUNIT_CLOCK_GATE2 (VLV_DISPLAY_BASE + 0x2064)
#define VLV_IIR_RW (VLV_DISPLAY_BASE + 0x2084)
#define VLV_IER (VLV_DISPLAY_BASE + 0x20a0)
#define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4)
@@ -1084,6 +1289,7 @@
#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
+#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
@@ -1124,24 +1330,43 @@
/* These are all the "old" interrupts */
#define ILK_BSD_USER_INTERRUPT (1<<5)
+
+#define I915_PM_INTERRUPT (1<<31)
+#define I915_ISP_INTERRUPT (1<<22)
+#define I915_LPE_PIPE_B_INTERRUPT (1<<21)
+#define I915_LPE_PIPE_A_INTERRUPT (1<<20)
+#define I915_MIPIB_INTERRUPT (1<<19)
+#define I915_MIPIA_INTERRUPT (1<<18)
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
+#define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT (1<<16)
+#define I915_MASTER_ERROR_INTERRUPT (1<<15)
#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
+#define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT (1<<14)
#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */
+#define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT (1<<13)
#define I915_HWB_OOM_INTERRUPT (1<<13)
+#define I915_LPE_PIPE_C_INTERRUPT (1<<12)
#define I915_SYNC_STATUS_INTERRUPT (1<<12)
+#define I915_MISC_INTERRUPT (1<<11)
#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
+#define I915_DISPLAY_PIPE_C_VBLANK_INTERRUPT (1<<10)
#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
+#define I915_DISPLAY_PIPE_C_EVENT_INTERRUPT (1<<9)
#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
+#define I915_DISPLAY_PIPE_C_DPBM_INTERRUPT (1<<8)
#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
+#define I915_DISPLAY_PIPE_A_DPBM_INTERRUPT (1<<3)
+#define I915_DISPLAY_PIPE_B_DPBM_INTERRUPT (1<<2)
#define I915_DEBUG_INTERRUPT (1<<2)
+#define I915_WINVALID_INTERRUPT (1<<1)
#define I915_USER_INTERRUPT (1<<1)
#define I915_ASLE_INTERRUPT (1<<0)
-#define I915_BSD_USER_INTERRUPT (1 << 25)
+#define I915_BSD_USER_INTERRUPT (1<<25)
#define GEN6_BSD_RNCID 0x12198
@@ -1298,6 +1523,7 @@
#define GMBUS_PORT_SSC 1
#define GMBUS_PORT_VGADDC 2
#define GMBUS_PORT_PANEL 3
+#define GMBUS_PORT_DPD_CHV 3 /* HDMID_CHV */
#define GMBUS_PORT_DPC 4 /* HDMIC */
#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
#define GMBUS_PORT_DPD 6 /* HDMID */
@@ -1339,6 +1565,7 @@
*/
#define DPLL_A_OFFSET 0x6014
#define DPLL_B_OFFSET 0x6018
+#define CHV_DPLL_C_OFFSET 0x6030
#define DPLL(pipe) (dev_priv->info.dpll_offsets[pipe] + \
dev_priv->info.display_mmio_offset)
@@ -1373,10 +1600,23 @@
#define DPLL_LOCK_VLV (1<<15)
#define DPLL_INTEGRATED_CRI_CLK_VLV (1<<14)
#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
+#define DPLL_SSC_REF_CLOCK_CHV (1<<13)
#define DPLL_PORTC_READY_MASK (0xf << 4)
#define DPLL_PORTB_READY_MASK (0xf)
#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
+
+/* Additional CHV pll/phy registers */
+#define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240)
+#define DPLL_PORTD_READY_MASK (0xf)
+#define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100)
+#define PHY_COM_LANE_RESET_DEASSERT(phy, val) \
+ ((phy == DPIO_PHY0) ? (val | 1) : (val | 2))
+#define PHY_COM_LANE_RESET_ASSERT(phy, val) \
+ ((phy == DPIO_PHY0) ? (val & ~1) : (val & ~2))
+#define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104)
+#define PHY_POWERGOOD(phy) ((phy == DPIO_PHY0) ? (1<<31) : (1<<30))
+
/*
* The i830 generation, in LVDS mode, defines P1 as the bit number set within
* this field (only one bit may be set).
@@ -1417,6 +1657,7 @@
#define DPLL_A_MD_OFFSET 0x601c /* 965+ only */
#define DPLL_B_MD_OFFSET 0x6020 /* 965+ only */
+#define CHV_DPLL_C_MD_OFFSET 0x603c
#define DPLL_MD(pipe) (dev_priv->info.dpll_md_offsets[pipe] + \
dev_priv->info.display_mmio_offset)
@@ -1516,7 +1757,7 @@
# define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 915-945 */
# define OVFUNIT_CLOCK_GATE_DISABLE (1 << 5)
# define OVBUNIT_CLOCK_GATE_DISABLE (1 << 4)
-/**
+/*
* This bit must be set on the 830 to prevent hangs when turning off the
* overlay scaler.
*/
@@ -1536,12 +1777,12 @@
# define COLOR_CALCULATOR_CLOCK_GATE_DISABLE (1 << 7)
# define MOTION_COMP_CLOCK_GATE_DISABLE (1 << 6)
# define MAG_CLOCK_GATE_DISABLE (1 << 5)
-/** This bit must be unset on 855,865 */
+/* This bit must be unset on 855,865 */
# define MECI_CLOCK_GATE_DISABLE (1 << 4)
# define DCMP_CLOCK_GATE_DISABLE (1 << 3)
# define MEC_CLOCK_GATE_DISABLE (1 << 2)
# define MECO_CLOCK_GATE_DISABLE (1 << 1)
-/** This bit must be set on 855,865. */
+/* This bit must be set on 855,865. */
# define SV_CLOCK_GATE_DISABLE (1 << 0)
# define I915_MPEG_CLOCK_GATE_DISABLE (1 << 16)
# define I915_VLD_IP_PR_CLOCK_GATE_DISABLE (1 << 15)
@@ -1562,14 +1803,14 @@
# define I915_BY_CLOCK_GATE_DISABLE (1 << 0)
# define I965_RCZ_CLOCK_GATE_DISABLE (1 << 30)
-/** This bit must always be set on 965G/965GM */
+/* This bit must always be set on 965G/965GM */
# define I965_RCC_CLOCK_GATE_DISABLE (1 << 29)
# define I965_RCPB_CLOCK_GATE_DISABLE (1 << 28)
# define I965_DAP_CLOCK_GATE_DISABLE (1 << 27)
# define I965_ROC_CLOCK_GATE_DISABLE (1 << 26)
# define I965_GW_CLOCK_GATE_DISABLE (1 << 25)
# define I965_TD_CLOCK_GATE_DISABLE (1 << 24)
-/** This bit must always be set on 965G */
+/* This bit must always be set on 965G */
# define I965_ISC_CLOCK_GATE_DISABLE (1 << 23)
# define I965_IC_CLOCK_GATE_DISABLE (1 << 22)
# define I965_EU_CLOCK_GATE_DISABLE (1 << 21)
@@ -1594,6 +1835,10 @@
#define VF_UNIT_CLOCK_GATE_DISABLE (1 << 9)
#define GS_UNIT_CLOCK_GATE_DISABLE (1 << 7)
#define CL_UNIT_CLOCK_GATE_DISABLE (1 << 6)
+
+#define VDECCLK_GATE_D 0x620C /* g4x only */
+#define VCP_UNIT_CLOCK_GATE_DISABLE (1 << 4)
+
#define RAMCLK_GATE_D 0x6210 /* CRL only */
#define DEUC 0x6214 /* CRL only */
@@ -1613,6 +1858,7 @@
*/
#define PALETTE_A_OFFSET 0xa000
#define PALETTE_B_OFFSET 0xa800
+#define CHV_PALETTE_C_OFFSET 0xc000
#define PALETTE(pipe) (dev_priv->info.palette_offsets[pipe] + \
dev_priv->info.display_mmio_offset)
@@ -1635,7 +1881,7 @@
/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
#define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04)
-/** 915-945 and GM965 MCH register controlling DRAM channel access */
+/* 915-945 and GM965 MCH register controlling DRAM channel access */
#define DCC 0x10200
#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
@@ -1644,15 +1890,15 @@
#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
-/** Pineview MCH register contains DDR3 setting */
+/* Pineview MCH register contains DDR3 setting */
#define CSHRDDR3CTL 0x101a8
#define CSHRDDR3CTL_DDR3 (1 << 2)
-/** 965 MCH register controlling DRAM channel configuration */
+/* 965 MCH register controlling DRAM channel configuration */
#define C0DRB3 0x10206
#define C1DRB3 0x10606
-/** snb MCH registers for reading the DRAM channel configuration */
+/* snb MCH registers for reading the DRAM channel configuration */
#define MAD_DIMM_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5004)
#define MAD_DIMM_C1 (MCHBAR_MIRROR_BASE_SNB + 0x5008)
#define MAD_DIMM_C2 (MCHBAR_MIRROR_BASE_SNB + 0x500C)
@@ -1674,7 +1920,7 @@
#define MAD_DIMM_A_SIZE_SHIFT 0
#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT)
-/** snb MCH registers for priority tuning */
+/* snb MCH registers for priority tuning */
#define MCH_SSKPD (MCHBAR_MIRROR_BASE_SNB + 0x5d10)
#define MCH_SSKPD_WM0_MASK 0x3f
#define MCH_SSKPD_WM0_VAL 0xc
@@ -2102,6 +2348,7 @@
#define TRANSCODER_A_OFFSET 0x60000
#define TRANSCODER_B_OFFSET 0x61000
#define TRANSCODER_C_OFFSET 0x62000
+#define CHV_TRANSCODER_C_OFFSET 0x63000
#define TRANSCODER_EDP_OFFSET 0x6f000
#define _TRANSCODER2(pipe, reg) (dev_priv->info.trans_offsets[(pipe)] - \
@@ -2326,6 +2573,7 @@
#define GEN3_SDVOC 0x61160
#define GEN4_HDMIB GEN3_SDVOB
#define GEN4_HDMIC GEN3_SDVOC
+#define CHV_HDMID 0x6116C
#define PCH_SDVOB 0xe1140
#define PCH_HDMIB PCH_SDVOB
#define PCH_HDMIC 0xe1150
@@ -2346,7 +2594,7 @@
#define SDVO_PIPE_B_SELECT (1 << 30)
#define SDVO_STALL_SELECT (1 << 29)
#define SDVO_INTERRUPT_ENABLE (1 << 26)
-/**
+/*
* 915G/GM SDVO pixel multiplier.
* Programmed value is multiplier - 1, up to 5x.
* \sa DPLL_MD_UDI_MULTIPLIER_MASK
@@ -2386,6 +2634,10 @@
#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
#define SDVO_PIPE_SEL_MASK_CPT (3 << 29)
+/* CHV SDVO/HDMI bits: */
+#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
+#define SDVO_PIPE_SEL_MASK_CHV (3 << 24)
+
/* DVO port control */
#define DVOA 0x61120
@@ -2656,65 +2908,65 @@
/* TV port control */
#define TV_CTL 0x68000
-/** Enables the TV encoder */
+/* Enables the TV encoder */
# define TV_ENC_ENABLE (1 << 31)
-/** Sources the TV encoder input from pipe B instead of A. */
+/* Sources the TV encoder input from pipe B instead of A. */
# define TV_ENC_PIPEB_SELECT (1 << 30)
-/** Outputs composite video (DAC A only) */
+/* Outputs composite video (DAC A only) */
# define TV_ENC_OUTPUT_COMPOSITE (0 << 28)
-/** Outputs SVideo video (DAC B/C) */
+/* Outputs SVideo video (DAC B/C) */
# define TV_ENC_OUTPUT_SVIDEO (1 << 28)
-/** Outputs Component video (DAC A/B/C) */
+/* Outputs Component video (DAC A/B/C) */
# define TV_ENC_OUTPUT_COMPONENT (2 << 28)
-/** Outputs Composite and SVideo (DAC A/B/C) */
+/* Outputs Composite and SVideo (DAC A/B/C) */
# define TV_ENC_OUTPUT_SVIDEO_COMPOSITE (3 << 28)
# define TV_TRILEVEL_SYNC (1 << 21)
-/** Enables slow sync generation (945GM only) */
+/* Enables slow sync generation (945GM only) */
# define TV_SLOW_SYNC (1 << 20)
-/** Selects 4x oversampling for 480i and 576p */
+/* Selects 4x oversampling for 480i and 576p */
# define TV_OVERSAMPLE_4X (0 << 18)
-/** Selects 2x oversampling for 720p and 1080i */
+/* Selects 2x oversampling for 720p and 1080i */
# define TV_OVERSAMPLE_2X (1 << 18)
-/** Selects no oversampling for 1080p */
+/* Selects no oversampling for 1080p */
# define TV_OVERSAMPLE_NONE (2 << 18)
-/** Selects 8x oversampling */
+/* Selects 8x oversampling */
# define TV_OVERSAMPLE_8X (3 << 18)
-/** Selects progressive mode rather than interlaced */
+/* Selects progressive mode rather than interlaced */
# define TV_PROGRESSIVE (1 << 17)
-/** Sets the colorburst to PAL mode. Required for non-M PAL modes. */
+/* Sets the colorburst to PAL mode. Required for non-M PAL modes. */
# define TV_PAL_BURST (1 << 16)
-/** Field for setting delay of Y compared to C */
+/* Field for setting delay of Y compared to C */
# define TV_YC_SKEW_MASK (7 << 12)
-/** Enables a fix for 480p/576p standard definition modes on the 915GM only */
+/* Enables a fix for 480p/576p standard definition modes on the 915GM only */
# define TV_ENC_SDP_FIX (1 << 11)
-/**
+/*
* Enables a fix for the 915GM only.
*
* Not sure what it does.
*/
# define TV_ENC_C0_FIX (1 << 10)
-/** Bits that must be preserved by software */
+/* Bits that must be preserved by software */
# define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf)
# define TV_FUSE_STATE_MASK (3 << 4)
-/** Read-only state that reports all features enabled */
+/* Read-only state that reports all features enabled */
# define TV_FUSE_STATE_ENABLED (0 << 4)
-/** Read-only state that reports that Macrovision is disabled in hardware*/
+/* Read-only state that reports that Macrovision is disabled in hardware*/
# define TV_FUSE_STATE_NO_MACROVISION (1 << 4)
-/** Read-only state that reports that TV-out is disabled in hardware. */
+/* Read-only state that reports that TV-out is disabled in hardware. */
# define TV_FUSE_STATE_DISABLED (2 << 4)
-/** Normal operation */
+/* Normal operation */
# define TV_TEST_MODE_NORMAL (0 << 0)
-/** Encoder test pattern 1 - combo pattern */
+/* Encoder test pattern 1 - combo pattern */
# define TV_TEST_MODE_PATTERN_1 (1 << 0)
-/** Encoder test pattern 2 - full screen vertical 75% color bars */
+/* Encoder test pattern 2 - full screen vertical 75% color bars */
# define TV_TEST_MODE_PATTERN_2 (2 << 0)
-/** Encoder test pattern 3 - full screen horizontal 75% color bars */
+/* Encoder test pattern 3 - full screen horizontal 75% color bars */
# define TV_TEST_MODE_PATTERN_3 (3 << 0)
-/** Encoder test pattern 4 - random noise */
+/* Encoder test pattern 4 - random noise */
# define TV_TEST_MODE_PATTERN_4 (4 << 0)
-/** Encoder test pattern 5 - linear color ramps */
+/* Encoder test pattern 5 - linear color ramps */
# define TV_TEST_MODE_PATTERN_5 (5 << 0)
-/**
+/*
* This test mode forces the DACs to 50% of full output.
*
* This is used for load detection in combination with TVDAC_SENSE_MASK
@@ -2724,35 +2976,35 @@
#define TV_DAC 0x68004
# define TV_DAC_SAVE 0x00ffff00
-/**
+/*
* Reports that DAC state change logic has reported change (RO).
*
* This gets cleared when TV_DAC_STATE_EN is cleared
*/
# define TVDAC_STATE_CHG (1 << 31)
# define TVDAC_SENSE_MASK (7 << 28)
-/** Reports that DAC A voltage is above the detect threshold */
+/* Reports that DAC A voltage is above the detect threshold */
# define TVDAC_A_SENSE (1 << 30)
-/** Reports that DAC B voltage is above the detect threshold */
+/* Reports that DAC B voltage is above the detect threshold */
# define TVDAC_B_SENSE (1 << 29)
-/** Reports that DAC C voltage is above the detect threshold */
+/* Reports that DAC C voltage is above the detect threshold */
# define TVDAC_C_SENSE (1 << 28)
-/**
+/*
* Enables DAC state detection logic, for load-based TV detection.
*
* The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set
* to off, for load detection to work.
*/
# define TVDAC_STATE_CHG_EN (1 << 27)
-/** Sets the DAC A sense value to high */
+/* Sets the DAC A sense value to high */
# define TVDAC_A_SENSE_CTL (1 << 26)
-/** Sets the DAC B sense value to high */
+/* Sets the DAC B sense value to high */
# define TVDAC_B_SENSE_CTL (1 << 25)
-/** Sets the DAC C sense value to high */
+/* Sets the DAC C sense value to high */
# define TVDAC_C_SENSE_CTL (1 << 24)
-/** Overrides the ENC_ENABLE and DAC voltage levels */
+/* Overrides the ENC_ENABLE and DAC voltage levels */
# define DAC_CTL_OVERRIDE (1 << 7)
-/** Sets the slew rate. Must be preserved in software */
+/* Sets the slew rate. Must be preserved in software */
# define ENC_TVDAC_SLEW_FAST (1 << 6)
# define DAC_A_1_3_V (0 << 4)
# define DAC_A_1_1_V (1 << 4)
@@ -2767,7 +3019,7 @@
# define DAC_C_0_7_V (2 << 0)
# define DAC_C_MASK (3 << 0)
-/**
+/*
* CSC coefficients are stored in a floating point format with 9 bits of
* mantissa and 2 or 3 bits of exponent. The exponent is represented as 2**-n,
* where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with
@@ -2782,7 +3034,7 @@
#define TV_CSC_Y2 0x68014
# define TV_BY_MASK 0x07ff0000
# define TV_BY_SHIFT 16
-/**
+/*
* Y attenuation for component video.
*
* Stored in 1.9 fixed point.
@@ -2799,7 +3051,7 @@
#define TV_CSC_U2 0x6801c
# define TV_BU_MASK 0x07ff0000
# define TV_BU_SHIFT 16
-/**
+/*
* U attenuation for component video.
*
* Stored in 1.9 fixed point.
@@ -2816,7 +3068,7 @@
#define TV_CSC_V2 0x68024
# define TV_BV_MASK 0x07ff0000
# define TV_BV_SHIFT 16
-/**
+/*
* V attenuation for component video.
*
* Stored in 1.9 fixed point.
@@ -2825,74 +3077,74 @@
# define TV_AV_SHIFT 0
#define TV_CLR_KNOBS 0x68028
-/** 2s-complement brightness adjustment */
+/* 2s-complement brightness adjustment */
# define TV_BRIGHTNESS_MASK 0xff000000
# define TV_BRIGHTNESS_SHIFT 24
-/** Contrast adjustment, as a 2.6 unsigned floating point number */
+/* Contrast adjustment, as a 2.6 unsigned floating point number */
# define TV_CONTRAST_MASK 0x00ff0000
# define TV_CONTRAST_SHIFT 16
-/** Saturation adjustment, as a 2.6 unsigned floating point number */
+/* Saturation adjustment, as a 2.6 unsigned floating point number */
# define TV_SATURATION_MASK 0x0000ff00
# define TV_SATURATION_SHIFT 8
-/** Hue adjustment, as an integer phase angle in degrees */
+/* Hue adjustment, as an integer phase angle in degrees */
# define TV_HUE_MASK 0x000000ff
# define TV_HUE_SHIFT 0
#define TV_CLR_LEVEL 0x6802c
-/** Controls the DAC level for black */
+/* Controls the DAC level for black */
# define TV_BLACK_LEVEL_MASK 0x01ff0000
# define TV_BLACK_LEVEL_SHIFT 16
-/** Controls the DAC level for blanking */
+/* Controls the DAC level for blanking */
# define TV_BLANK_LEVEL_MASK 0x000001ff
# define TV_BLANK_LEVEL_SHIFT 0
#define TV_H_CTL_1 0x68030
-/** Number of pixels in the hsync. */
+/* Number of pixels in the hsync. */
# define TV_HSYNC_END_MASK 0x1fff0000
# define TV_HSYNC_END_SHIFT 16
-/** Total number of pixels minus one in the line (display and blanking). */
+/* Total number of pixels minus one in the line (display and blanking). */
# define TV_HTOTAL_MASK 0x00001fff
# define TV_HTOTAL_SHIFT 0
#define TV_H_CTL_2 0x68034
-/** Enables the colorburst (needed for non-component color) */
+/* Enables the colorburst (needed for non-component color) */
# define TV_BURST_ENA (1 << 31)
-/** Offset of the colorburst from the start of hsync, in pixels minus one. */
+/* Offset of the colorburst from the start of hsync, in pixels minus one. */
# define TV_HBURST_START_SHIFT 16
# define TV_HBURST_START_MASK 0x1fff0000
-/** Length of the colorburst */
+/* Length of the colorburst */
# define TV_HBURST_LEN_SHIFT 0
# define TV_HBURST_LEN_MASK 0x0001fff
#define TV_H_CTL_3 0x68038
-/** End of hblank, measured in pixels minus one from start of hsync */
+/* End of hblank, measured in pixels minus one from start of hsync */
# define TV_HBLANK_END_SHIFT 16
# define TV_HBLANK_END_MASK 0x1fff0000
-/** Start of hblank, measured in pixels minus one from start of hsync */
+/* Start of hblank, measured in pixels minus one from start of hsync */
# define TV_HBLANK_START_SHIFT 0
# define TV_HBLANK_START_MASK 0x0001fff
#define TV_V_CTL_1 0x6803c
-/** XXX */
+/* XXX */
# define TV_NBR_END_SHIFT 16
# define TV_NBR_END_MASK 0x07ff0000
-/** XXX */
+/* XXX */
# define TV_VI_END_F1_SHIFT 8
# define TV_VI_END_F1_MASK 0x00003f00
-/** XXX */
+/* XXX */
# define TV_VI_END_F2_SHIFT 0
# define TV_VI_END_F2_MASK 0x0000003f
#define TV_V_CTL_2 0x68040
-/** Length of vsync, in half lines */
+/* Length of vsync, in half lines */
# define TV_VSYNC_LEN_MASK 0x07ff0000
# define TV_VSYNC_LEN_SHIFT 16
-/** Offset of the start of vsync in field 1, measured in one less than the
+/* Offset of the start of vsync in field 1, measured in one less than the
* number of half lines.
*/
# define TV_VSYNC_START_F1_MASK 0x00007f00
# define TV_VSYNC_START_F1_SHIFT 8
-/**
+/*
* Offset of the start of vsync in field 2, measured in one less than the
* number of half lines.
*/
@@ -2900,17 +3152,17 @@
# define TV_VSYNC_START_F2_SHIFT 0
#define TV_V_CTL_3 0x68044
-/** Enables generation of the equalization signal */
+/* Enables generation of the equalization signal */
# define TV_EQUAL_ENA (1 << 31)
-/** Length of vsync, in half lines */
+/* Length of vsync, in half lines */
# define TV_VEQ_LEN_MASK 0x007f0000
# define TV_VEQ_LEN_SHIFT 16
-/** Offset of the start of equalization in field 1, measured in one less than
+/* Offset of the start of equalization in field 1, measured in one less than
* the number of half lines.
*/
# define TV_VEQ_START_F1_MASK 0x0007f00
# define TV_VEQ_START_F1_SHIFT 8
-/**
+/*
* Offset of the start of equalization in field 2, measured in one less than
* the number of half lines.
*/
@@ -2918,13 +3170,13 @@
# define TV_VEQ_START_F2_SHIFT 0
#define TV_V_CTL_4 0x68048
-/**
+/*
* Offset to start of vertical colorburst, measured in one less than the
* number of lines from vertical start.
*/
# define TV_VBURST_START_F1_MASK 0x003f0000
# define TV_VBURST_START_F1_SHIFT 16
-/**
+/*
* Offset to the end of vertical colorburst, measured in one less than the
* number of lines from the start of NBR.
*/
@@ -2932,13 +3184,13 @@
# define TV_VBURST_END_F1_SHIFT 0
#define TV_V_CTL_5 0x6804c
-/**
+/*
* Offset to start of vertical colorburst, measured in one less than the
* number of lines from vertical start.
*/
# define TV_VBURST_START_F2_MASK 0x003f0000
# define TV_VBURST_START_F2_SHIFT 16
-/**
+/*
* Offset to the end of vertical colorburst, measured in one less than the
* number of lines from the start of NBR.
*/
@@ -2946,13 +3198,13 @@
# define TV_VBURST_END_F2_SHIFT 0
#define TV_V_CTL_6 0x68050
-/**
+/*
* Offset to start of vertical colorburst, measured in one less than the
* number of lines from vertical start.
*/
# define TV_VBURST_START_F3_MASK 0x003f0000
# define TV_VBURST_START_F3_SHIFT 16
-/**
+/*
* Offset to the end of vertical colorburst, measured in one less than the
* number of lines from the start of NBR.
*/
@@ -2960,13 +3212,13 @@
# define TV_VBURST_END_F3_SHIFT 0
#define TV_V_CTL_7 0x68054
-/**
+/*
* Offset to start of vertical colorburst, measured in one less than the
* number of lines from vertical start.
*/
# define TV_VBURST_START_F4_MASK 0x003f0000
# define TV_VBURST_START_F4_SHIFT 16
-/**
+/*
* Offset to the end of vertical colorburst, measured in one less than the
* number of lines from the start of NBR.
*/
@@ -2974,56 +3226,56 @@
# define TV_VBURST_END_F4_SHIFT 0
#define TV_SC_CTL_1 0x68060
-/** Turns on the first subcarrier phase generation DDA */
+/* Turns on the first subcarrier phase generation DDA */
# define TV_SC_DDA1_EN (1 << 31)
-/** Turns on the first subcarrier phase generation DDA */
+/* Turns on the first subcarrier phase generation DDA */
# define TV_SC_DDA2_EN (1 << 30)
-/** Turns on the first subcarrier phase generation DDA */
+/* Turns on the first subcarrier phase generation DDA */
# define TV_SC_DDA3_EN (1 << 29)
-/** Sets the subcarrier DDA to reset frequency every other field */
+/* Sets the subcarrier DDA to reset frequency every other field */
# define TV_SC_RESET_EVERY_2 (0 << 24)
-/** Sets the subcarrier DDA to reset frequency every fourth field */
+/* Sets the subcarrier DDA to reset frequency every fourth field */
# define TV_SC_RESET_EVERY_4 (1 << 24)
-/** Sets the subcarrier DDA to reset frequency every eighth field */
+/* Sets the subcarrier DDA to reset frequency every eighth field */
# define TV_SC_RESET_EVERY_8 (2 << 24)
-/** Sets the subcarrier DDA to never reset the frequency */
+/* Sets the subcarrier DDA to never reset the frequency */
# define TV_SC_RESET_NEVER (3 << 24)
-/** Sets the peak amplitude of the colorburst.*/
+/* Sets the peak amplitude of the colorburst.*/
# define TV_BURST_LEVEL_MASK 0x00ff0000
# define TV_BURST_LEVEL_SHIFT 16
-/** Sets the increment of the first subcarrier phase generation DDA */
+/* Sets the increment of the first subcarrier phase generation DDA */
# define TV_SCDDA1_INC_MASK 0x00000fff
# define TV_SCDDA1_INC_SHIFT 0
#define TV_SC_CTL_2 0x68064
-/** Sets the rollover for the second subcarrier phase generation DDA */
+/* Sets the rollover for the second subcarrier phase generation DDA */
# define TV_SCDDA2_SIZE_MASK 0x7fff0000
# define TV_SCDDA2_SIZE_SHIFT 16
-/** Sets the increent of the second subcarrier phase generation DDA */
+/* Sets the increent of the second subcarrier phase generation DDA */
# define TV_SCDDA2_INC_MASK 0x00007fff
# define TV_SCDDA2_INC_SHIFT 0
#define TV_SC_CTL_3 0x68068
-/** Sets the rollover for the third subcarrier phase generation DDA */
+/* Sets the rollover for the third subcarrier phase generation DDA */
# define TV_SCDDA3_SIZE_MASK 0x7fff0000
# define TV_SCDDA3_SIZE_SHIFT 16
-/** Sets the increent of the third subcarrier phase generation DDA */
+/* Sets the increent of the third subcarrier phase generation DDA */
# define TV_SCDDA3_INC_MASK 0x00007fff
# define TV_SCDDA3_INC_SHIFT 0
#define TV_WIN_POS 0x68070
-/** X coordinate of the display from the start of horizontal active */
+/* X coordinate of the display from the start of horizontal active */
# define TV_XPOS_MASK 0x1fff0000
# define TV_XPOS_SHIFT 16
-/** Y coordinate of the display from the start of vertical active (NBR) */
+/* Y coordinate of the display from the start of vertical active (NBR) */
# define TV_YPOS_MASK 0x00000fff
# define TV_YPOS_SHIFT 0
#define TV_WIN_SIZE 0x68074
-/** Horizontal size of the display window, measured in pixels*/
+/* Horizontal size of the display window, measured in pixels*/
# define TV_XSIZE_MASK 0x1fff0000
# define TV_XSIZE_SHIFT 16
-/**
+/*
* Vertical size of the display window, measured in pixels.
*
* Must be even for interlaced modes.
@@ -3032,28 +3284,28 @@
# define TV_YSIZE_SHIFT 0
#define TV_FILTER_CTL_1 0x68080
-/**
+/*
* Enables automatic scaling calculation.
*
* If set, the rest of the registers are ignored, and the calculated values can
* be read back from the register.
*/
# define TV_AUTO_SCALE (1 << 31)
-/**
+/*
* Disables the vertical filter.
*
* This is required on modes more than 1024 pixels wide */
# define TV_V_FILTER_BYPASS (1 << 29)
-/** Enables adaptive vertical filtering */
+/* Enables adaptive vertical filtering */
# define TV_VADAPT (1 << 28)
# define TV_VADAPT_MODE_MASK (3 << 26)
-/** Selects the least adaptive vertical filtering mode */
+/* Selects the least adaptive vertical filtering mode */
# define TV_VADAPT_MODE_LEAST (0 << 26)
-/** Selects the moderately adaptive vertical filtering mode */
+/* Selects the moderately adaptive vertical filtering mode */
# define TV_VADAPT_MODE_MODERATE (1 << 26)
-/** Selects the most adaptive vertical filtering mode */
+/* Selects the most adaptive vertical filtering mode */
# define TV_VADAPT_MODE_MOST (3 << 26)
-/**
+/*
* Sets the horizontal scaling factor.
*
* This should be the fractional part of the horizontal scaling factor divided
@@ -3065,14 +3317,14 @@
# define TV_HSCALE_FRAC_SHIFT 0
#define TV_FILTER_CTL_2 0x68084
-/**
+/*
* Sets the integer part of the 3.15 fixed-point vertical scaling factor.
*
* TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1)
*/
# define TV_VSCALE_INT_MASK 0x00038000
# define TV_VSCALE_INT_SHIFT 15
-/**
+/*
* Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
*
* \sa TV_VSCALE_INT_MASK
@@ -3081,7 +3333,7 @@
# define TV_VSCALE_FRAC_SHIFT 0
#define TV_FILTER_CTL_3 0x68088
-/**
+/*
* Sets the integer part of the 3.15 fixed-point vertical scaling factor.
*
* TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1))
@@ -3090,7 +3342,7 @@
*/
# define TV_VSCALE_IP_INT_MASK 0x00038000
# define TV_VSCALE_IP_INT_SHIFT 15
-/**
+/*
* Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
*
* For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
@@ -3102,26 +3354,26 @@
#define TV_CC_CONTROL 0x68090
# define TV_CC_ENABLE (1 << 31)
-/**
+/*
* Specifies which field to send the CC data in.
*
* CC data is usually sent in field 0.
*/
# define TV_CC_FID_MASK (1 << 27)
# define TV_CC_FID_SHIFT 27
-/** Sets the horizontal position of the CC data. Usually 135. */
+/* Sets the horizontal position of the CC data. Usually 135. */
# define TV_CC_HOFF_MASK 0x03ff0000
# define TV_CC_HOFF_SHIFT 16
-/** Sets the vertical position of the CC data. Usually 21 */
+/* Sets the vertical position of the CC data. Usually 21 */
# define TV_CC_LINE_MASK 0x0000003f
# define TV_CC_LINE_SHIFT 0
#define TV_CC_DATA 0x68094
# define TV_CC_RDY (1 << 31)
-/** Second word of CC data to be transmitted. */
+/* Second word of CC data to be transmitted. */
# define TV_CC_DATA_2_MASK 0x007f0000
# define TV_CC_DATA_2_SHIFT 16
-/** First word of CC data to be transmitted. */
+/* First word of CC data to be transmitted. */
# define TV_CC_DATA_1_MASK 0x0000007f
# define TV_CC_DATA_1_SHIFT 0
@@ -3143,6 +3395,8 @@
#define DP_PORT_EN (1 << 31)
#define DP_PIPEB_SELECT (1 << 30)
#define DP_PIPE_MASK (1 << 30)
+#define DP_PIPE_SELECT_CHV(pipe) ((pipe) << 16)
+#define DP_PIPE_MASK_CHV (3 << 16)
/* Link training mode - select a suitable mode for each stage */
#define DP_LINK_TRAIN_PAT_1 (0 << 28)
@@ -3190,32 +3444,32 @@
#define DP_PLL_FREQ_160MHZ (1 << 16)
#define DP_PLL_FREQ_MASK (3 << 16)
-/** locked once port is enabled */
+/* locked once port is enabled */
#define DP_PORT_REVERSAL (1 << 15)
/* eDP */
#define DP_PLL_ENABLE (1 << 14)
-/** sends the clock on lane 15 of the PEG for debug */
+/* sends the clock on lane 15 of the PEG for debug */
#define DP_CLOCK_OUTPUT_ENABLE (1 << 13)
#define DP_SCRAMBLING_DISABLE (1 << 12)
#define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7)
-/** limit RGB values to avoid confusing TVs */
+/* limit RGB values to avoid confusing TVs */
#define DP_COLOR_RANGE_16_235 (1 << 8)
-/** Turn on the audio link */
+/* Turn on the audio link */
#define DP_AUDIO_OUTPUT_ENABLE (1 << 6)
-/** vs and hs sync polarity */
+/* vs and hs sync polarity */
#define DP_SYNC_VS_HIGH (1 << 4)
#define DP_SYNC_HS_HIGH (1 << 3)
-/** A fantasy */
+/* A fantasy */
#define DP_DETECTED (1 << 2)
-/** The aux channel provides a way to talk to the
+/* The aux channel provides a way to talk to the
* signal sink for DDC etc. Max packet size supported
* is 20 bytes in each direction, hence the 5 fixed
* data registers
@@ -3377,6 +3631,7 @@
#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL<<30)
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
#define PIPE_CRC_DONE_ENABLE (1UL<<28)
+#define PERF_COUNTER2_INTERRUPT_EN (1UL<<27)
#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
#define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26)
#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26)
@@ -3388,8 +3643,10 @@
#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
#define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL<<19)
+#define PERF_COUNTER_INTERRUPT_EN (1UL<<19)
#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
+#define PIPE_FRAMESTART_INTERRUPT_ENABLE (1UL<<17)
#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
@@ -3397,6 +3654,7 @@
#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL<<14)
#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
+#define PERF_COUNTER2_INTERRUPT_STATUS (1UL<<11)
#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL<<10)
#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
@@ -3405,20 +3663,25 @@
#define PIPE_DPST_EVENT_STATUS (1UL<<7)
#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
#define PIPE_A_PSR_STATUS_VLV (1UL<<6)
+#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
#define PIPE_B_PSR_STATUS_VLV (1UL<<3)
+#define PERF_COUNTER_INTERRUPT_STATUS (1UL<<3)
#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */
#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
+#define PIPE_FRAMESTART_INTERRUPT_STATUS (1UL<<1)
#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
+#define PIPE_HBLANK_INT_STATUS (1UL<<0)
#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000
#define PIPESTAT_INT_STATUS_MASK 0x0000ffff
-#define PIPE_A_OFFSET 0x70000
-#define PIPE_B_OFFSET 0x71000
-#define PIPE_C_OFFSET 0x72000
+#define PIPE_A_OFFSET 0x70000
+#define PIPE_B_OFFSET 0x71000
+#define PIPE_C_OFFSET 0x72000
+#define CHV_PIPE_C_OFFSET 0x74000
/*
* There's actually no pipe EDP. Some pipe registers have
* simply shifted from the pipe to the transcoder, while
@@ -3456,14 +3719,25 @@
#define SPRITED_FLIP_DONE_INT_EN (1<<26)
#define SPRITEC_FLIP_DONE_INT_EN (1<<25)
#define PLANEB_FLIP_DONE_INT_EN (1<<24)
+#define PIPE_PSR_INT_EN (1<<22)
#define PIPEA_LINE_COMPARE_INT_EN (1<<21)
#define PIPEA_HLINE_INT_EN (1<<20)
#define PIPEA_VBLANK_INT_EN (1<<19)
#define SPRITEB_FLIP_DONE_INT_EN (1<<18)
#define SPRITEA_FLIP_DONE_INT_EN (1<<17)
#define PLANEA_FLIPDONE_INT_EN (1<<16)
+#define PIPEC_LINE_COMPARE_INT_EN (1<<13)
+#define PIPEC_HLINE_INT_EN (1<<12)
+#define PIPEC_VBLANK_INT_EN (1<<11)
+#define SPRITEF_FLIPDONE_INT_EN (1<<10)
+#define SPRITEE_FLIPDONE_INT_EN (1<<9)
+#define PLANEC_FLIPDONE_INT_EN (1<<8)
-#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */
+#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
+#define SPRITEF_INVALID_GTT_INT_EN (1<<27)
+#define SPRITEE_INVALID_GTT_INT_EN (1<<26)
+#define PLANEC_INVALID_GTT_INT_EN (1<<25)
+#define CURSORC_INVALID_GTT_INT_EN (1<<24)
#define CURSORB_INVALID_GTT_INT_EN (1<<23)
#define CURSORA_INVALID_GTT_INT_EN (1<<22)
#define SPRITED_INVALID_GTT_INT_EN (1<<21)
@@ -3473,6 +3747,11 @@
#define SPRITEA_INVALID_GTT_INT_EN (1<<17)
#define PLANEA_INVALID_GTT_INT_EN (1<<16)
#define DPINVGTT_EN_MASK 0xff0000
+#define DPINVGTT_EN_MASK_CHV 0xfff0000
+#define SPRITEF_INVALID_GTT_STATUS (1<<11)
+#define SPRITEE_INVALID_GTT_STATUS (1<<10)
+#define PLANEC_INVALID_GTT_STATUS (1<<9)
+#define CURSORC_INVALID_GTT_STATUS (1<<8)
#define CURSORB_INVALID_GTT_STATUS (1<<7)
#define CURSORA_INVALID_GTT_STATUS (1<<6)
#define SPRITED_INVALID_GTT_STATUS (1<<5)
@@ -3482,6 +3761,7 @@
#define SPRITEA_INVALID_GTT_STATUS (1<<1)
#define PLANEA_INVALID_GTT_STATUS (1<<0)
#define DPINVGTT_STATUS_MASK 0xff
+#define DPINVGTT_STATUS_MASK_CHV 0xfff
#define DSPARB 0x70030
#define DSPARB_CSTART_MASK (0x7f << 7)
@@ -3521,14 +3801,43 @@
#define DDL_CURSORA_PRECISION_32 (1<<31)
#define DDL_CURSORA_PRECISION_16 (0<<31)
#define DDL_CURSORA_SHIFT 24
+#define DDL_SPRITEB_PRECISION_32 (1<<23)
+#define DDL_SPRITEB_PRECISION_16 (0<<23)
+#define DDL_SPRITEB_SHIFT 16
+#define DDL_SPRITEA_PRECISION_32 (1<<15)
+#define DDL_SPRITEA_PRECISION_16 (0<<15)
+#define DDL_SPRITEA_SHIFT 8
#define DDL_PLANEA_PRECISION_32 (1<<7)
#define DDL_PLANEA_PRECISION_16 (0<<7)
+#define DDL_PLANEA_SHIFT 0
+
#define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054)
#define DDL_CURSORB_PRECISION_32 (1<<31)
#define DDL_CURSORB_PRECISION_16 (0<<31)
#define DDL_CURSORB_SHIFT 24
+#define DDL_SPRITED_PRECISION_32 (1<<23)
+#define DDL_SPRITED_PRECISION_16 (0<<23)
+#define DDL_SPRITED_SHIFT 16
+#define DDL_SPRITEC_PRECISION_32 (1<<15)
+#define DDL_SPRITEC_PRECISION_16 (0<<15)
+#define DDL_SPRITEC_SHIFT 8
#define DDL_PLANEB_PRECISION_32 (1<<7)
#define DDL_PLANEB_PRECISION_16 (0<<7)
+#define DDL_PLANEB_SHIFT 0
+
+#define VLV_DDL3 (VLV_DISPLAY_BASE + 0x70058)
+#define DDL_CURSORC_PRECISION_32 (1<<31)
+#define DDL_CURSORC_PRECISION_16 (0<<31)
+#define DDL_CURSORC_SHIFT 24
+#define DDL_SPRITEF_PRECISION_32 (1<<23)
+#define DDL_SPRITEF_PRECISION_16 (0<<23)
+#define DDL_SPRITEF_SHIFT 16
+#define DDL_SPRITEE_PRECISION_32 (1<<15)
+#define DDL_SPRITEE_PRECISION_16 (0<<15)
+#define DDL_SPRITEE_SHIFT 8
+#define DDL_PLANEC_PRECISION_32 (1<<7)
+#define DDL_PLANEC_PRECISION_16 (0<<7)
+#define DDL_PLANEC_SHIFT 0
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
@@ -3639,9 +3948,10 @@
#define _PIPEA_FRMCOUNT_GM45 0x70040
#define _PIPEA_FLIPCOUNT_GM45 0x70044
#define PIPE_FRMCOUNT_GM45(pipe) _PIPE2(pipe, _PIPEA_FRMCOUNT_GM45)
+#define PIPE_FLIPCOUNT_GM45(pipe) _PIPE2(pipe, _PIPEA_FLIPCOUNT_GM45)
/* Cursor A & B regs */
-#define _CURACNTR (dev_priv->info.display_mmio_offset + 0x70080)
+#define _CURACNTR 0x70080
/* Old style CUR*CNTR flags (desktop 8xx) */
#define CURSOR_ENABLE 0x80000000
#define CURSOR_GAMMA_ENABLE 0x40000000
@@ -3668,28 +3978,34 @@
#define MCURSOR_PIPE_B (1 << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
-#define _CURABASE (dev_priv->info.display_mmio_offset + 0x70084)
-#define _CURAPOS (dev_priv->info.display_mmio_offset + 0x70088)
+#define _CURABASE 0x70084
+#define _CURAPOS 0x70088
#define CURSOR_POS_MASK 0x007FF
#define CURSOR_POS_SIGN 0x8000
#define CURSOR_X_SHIFT 0
#define CURSOR_Y_SHIFT 16
#define CURSIZE 0x700a0
-#define _CURBCNTR (dev_priv->info.display_mmio_offset + 0x700c0)
-#define _CURBBASE (dev_priv->info.display_mmio_offset + 0x700c4)
-#define _CURBPOS (dev_priv->info.display_mmio_offset + 0x700c8)
+#define _CURBCNTR 0x700c0
+#define _CURBBASE 0x700c4
+#define _CURBPOS 0x700c8
#define _CURBCNTR_IVB 0x71080
#define _CURBBASE_IVB 0x71084
#define _CURBPOS_IVB 0x71088
-#define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR)
-#define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE)
-#define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS)
+#define _CURSOR2(pipe, reg) (dev_priv->info.cursor_offsets[(pipe)] - \
+ dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
+ dev_priv->info.display_mmio_offset)
-#define CURCNTR_IVB(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR_IVB)
-#define CURBASE_IVB(pipe) _PIPE(pipe, _CURABASE, _CURBBASE_IVB)
-#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB)
+#define CURCNTR(pipe) _CURSOR2(pipe, _CURACNTR)
+#define CURBASE(pipe) _CURSOR2(pipe, _CURABASE)
+#define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS)
+
+#define CURSOR_A_OFFSET 0x70080
+#define CURSOR_B_OFFSET 0x700c0
+#define CHV_CURSOR_C_OFFSET 0x700e0
+#define IVB_CURSOR_B_OFFSET 0x71080
+#define IVB_CURSOR_C_OFFSET 0x72080
/* Display A control */
#define _DSPACNTR 0x70180
@@ -4194,6 +4510,7 @@
#define GEN8_DE_PIPE_A_IRQ (1<<16)
#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+pipe))
#define GEN8_GT_VECS_IRQ (1<<6)
+#define GEN8_GT_PM_IRQ (1<<4)
#define GEN8_GT_VCS2_IRQ (1<<3)
#define GEN8_GT_VCS1_IRQ (1<<2)
#define GEN8_GT_BCS_IRQ (1<<1)
@@ -4933,6 +5250,8 @@
#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
#define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30)
#define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29)
+#define SDVO_PORT_TO_PIPE_CHV(val) (((val) & (3<<24)) >> 24)
+#define DP_PORT_TO_PIPE_CHV(val) (((val) & (3<<16)) >> 16)
#define TRANS_DP_CTL_A 0xe0300
#define TRANS_DP_CTL_B 0xe1300
@@ -4989,6 +5308,8 @@
#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
+#define VLV_PMWGICZ 0x1300a4
+
#define FORCEWAKE 0xA18C
#define FORCEWAKE_VLV 0x1300b0
#define FORCEWAKE_ACK_VLV 0x1300b4
@@ -5012,6 +5333,7 @@
#define FORCEWAKE_MT_ACK 0x130040
#define ECOBUS 0xa180
#define FORCEWAKE_MT_ENABLE (1<<5)
+#define VLV_SPAREG2H 0xA194
#define GTFIFODBG 0x120000
#define GT_FIFO_SBDROPERR (1<<6)
@@ -5031,6 +5353,7 @@
#define HSW_EDRAM_PRESENT 0x120010
#define GEN6_UCGCTL1 0x9400
+# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
@@ -5041,12 +5364,19 @@
# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
+#define GEN6_UCGCTL3 0x9408
+
#define GEN7_UCGCTL4 0x940c
#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
+#define GEN6_RCGCTL1 0x9410
+#define GEN6_RCGCTL2 0x9414
+#define GEN6_RSTCTL 0x9420
+
#define GEN8_UCGCTL6 0x9430
#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
+#define GEN6_GFXPAUSE 0xA000
#define GEN6_RPNSWREQ 0xA008
#define GEN6_TURBO_DISABLE (1<<31)
#define GEN6_FREQUENCY(x) ((x)<<25)
@@ -5099,6 +5429,9 @@
#define GEN6_RP_UP_EI 0xA068
#define GEN6_RP_DOWN_EI 0xA06C
#define GEN6_RP_IDLE_HYSTERSIS 0xA070
+#define GEN6_RPDEUHWTC 0xA080
+#define GEN6_RPDEUC 0xA084
+#define GEN6_RPDEUCSW 0xA088
#define GEN6_RC_STATE 0xA094
#define GEN6_RC1_WAKE_RATE_LIMIT 0xA098
#define GEN6_RC6_WAKE_RATE_LIMIT 0xA09C
@@ -5106,11 +5439,15 @@
#define GEN6_RC_EVALUATION_INTERVAL 0xA0A8
#define GEN6_RC_IDLE_HYSTERSIS 0xA0AC
#define GEN6_RC_SLEEP 0xA0B0
+#define GEN6_RCUBMABDTMR 0xA0B0
#define GEN6_RC1e_THRESHOLD 0xA0B4
#define GEN6_RC6_THRESHOLD 0xA0B8
#define GEN6_RC6p_THRESHOLD 0xA0BC
+#define VLV_RCEDATA 0xA0BC
#define GEN6_RC6pp_THRESHOLD 0xA0C0
#define GEN6_PMINTRMSK 0xA168
+#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
+#define VLV_PWRDWNUPCTL 0xA294
#define GEN6_PMISR 0x44020
#define GEN6_PMIMR 0x44024 /* rps_lock */
@@ -5127,6 +5464,9 @@
GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
+#define GEN7_GT_SCRATCH_BASE 0x4F100
+#define GEN7_GT_SCRATCH_REG_NUM 8
+
#define VLV_GTLC_SURVIVABILITY_REG 0x130098
#define VLV_GFX_CLK_STATUS_BIT (1<<3)
#define VLV_GFX_CLK_FORCE_ON_BIT (1<<2)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 56785e8..043123c 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -328,8 +328,6 @@
}
}
- intel_disable_gt_powersave(dev);
-
/* Cache mode state */
if (INTEL_INFO(dev)->gen < 7)
dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 3620997..86ce39a 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -186,7 +186,7 @@
struct drm_minor *dminor = dev_to_drm_minor(dev);
struct drm_device *drm_dev = dminor->dev;
struct drm_i915_private *dev_priv = drm_dev->dev_private;
- struct i915_hw_context *ctx;
+ struct intel_context *ctx;
u32 *temp = NULL; /* Just here to make handling failures easy */
int slice = (int)(uintptr_t)attr->private;
int ret;
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index b29d7b1..f5aa006 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -326,8 +326,8 @@
);
TRACE_EVENT(i915_gem_ring_sync_to,
- TP_PROTO(struct intel_ring_buffer *from,
- struct intel_ring_buffer *to,
+ TP_PROTO(struct intel_engine_cs *from,
+ struct intel_engine_cs *to,
u32 seqno),
TP_ARGS(from, to, seqno),
@@ -352,7 +352,7 @@
);
TRACE_EVENT(i915_gem_ring_dispatch,
- TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
+ TP_PROTO(struct intel_engine_cs *ring, u32 seqno, u32 flags),
TP_ARGS(ring, seqno, flags),
TP_STRUCT__entry(
@@ -375,7 +375,7 @@
);
TRACE_EVENT(i915_gem_ring_flush,
- TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush),
+ TP_PROTO(struct intel_engine_cs *ring, u32 invalidate, u32 flush),
TP_ARGS(ring, invalidate, flush),
TP_STRUCT__entry(
@@ -398,7 +398,7 @@
);
DECLARE_EVENT_CLASS(i915_gem_request,
- TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
TP_ARGS(ring, seqno),
TP_STRUCT__entry(
@@ -418,12 +418,12 @@
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
- TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
TP_ARGS(ring, seqno)
);
TRACE_EVENT(i915_gem_request_complete,
- TP_PROTO(struct intel_ring_buffer *ring),
+ TP_PROTO(struct intel_engine_cs *ring),
TP_ARGS(ring),
TP_STRUCT__entry(
@@ -443,12 +443,12 @@
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
- TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
TP_ARGS(ring, seqno)
);
TRACE_EVENT(i915_gem_request_wait_begin,
- TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
TP_ARGS(ring, seqno),
TP_STRUCT__entry(
@@ -477,12 +477,12 @@
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
- TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
TP_ARGS(ring, seqno)
);
DECLARE_EVENT_CLASS(i915_ring,
- TP_PROTO(struct intel_ring_buffer *ring),
+ TP_PROTO(struct intel_engine_cs *ring),
TP_ARGS(ring),
TP_STRUCT__entry(
@@ -499,12 +499,12 @@
);
DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
- TP_PROTO(struct intel_ring_buffer *ring),
+ TP_PROTO(struct intel_engine_cs *ring),
TP_ARGS(ring)
);
DEFINE_EVENT(i915_ring, i915_ring_wait_end,
- TP_PROTO(struct intel_ring_buffer *ring),
+ TP_PROTO(struct intel_engine_cs *ring),
TP_ARGS(ring)
);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 22d8347..1fc91df5 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -634,7 +634,7 @@
intel_runtime_pm_get(dev_priv);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
- connector->base.id, drm_get_connector_name(connector),
+ connector->base.id, connector->name,
force);
power_domain = intel_display_port_power_domain(intel_encoder);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 0ad4e96..b17b9c7 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -364,55 +364,6 @@
DRM_ERROR("FDI link training failed!\n");
}
-static void intel_ddi_mode_set(struct intel_encoder *encoder)
-{
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- int port = intel_ddi_get_encoder_port(encoder);
- int pipe = crtc->pipe;
- int type = encoder->type;
- struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
-
- DRM_DEBUG_KMS("Preparing DDI mode on port %c, pipe %c\n",
- port_name(port), pipe_name(pipe));
-
- crtc->eld_vld = false;
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(&encoder->base);
-
- intel_dp->DP = intel_dig_port->saved_port_bits |
- DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
- intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
-
- if (intel_dp->has_audio) {
- DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
- pipe_name(crtc->pipe));
-
- /* write eld */
- DRM_DEBUG_DRIVER("DP audio: write eld information\n");
- intel_write_eld(&encoder->base, adjusted_mode);
- }
- } else if (type == INTEL_OUTPUT_HDMI) {
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
-
- if (intel_hdmi->has_audio) {
- /* Proper support for digital audio needs a new logic
- * and a new set of registers, so we leave it for future
- * patch bombing.
- */
- DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
- pipe_name(crtc->pipe));
-
- /* write eld */
- DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
- intel_write_eld(&encoder->base, adjusted_mode);
- }
-
- intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
- }
-}
-
static struct intel_encoder *
intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
{
@@ -1062,9 +1013,7 @@
}
if (type == INTEL_OUTPUT_HDMI) {
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
-
- if (intel_hdmi->has_hdmi_sink)
+ if (intel_crtc->config.has_hdmi_sink)
temp |= TRANS_DDI_MODE_SELECT_HDMI;
else
temp |= TRANS_DDI_MODE_SELECT_DVI;
@@ -1293,28 +1242,48 @@
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_crtc *crtc = encoder->crtc;
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
+ if (crtc->config.has_audio) {
+ DRM_DEBUG_DRIVER("Audio on pipe %c on DDI\n",
+ pipe_name(crtc->pipe));
+
+ /* write eld */
+ DRM_DEBUG_DRIVER("DDI audio: write eld information\n");
+ intel_write_eld(encoder, &crtc->config.adjusted_mode);
+ }
+
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_edp_panel_on(intel_dp);
}
- WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
- I915_WRITE(PORT_CLK_SEL(port), intel_crtc->ddi_pll_sel);
+ WARN_ON(crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
+ I915_WRITE(PORT_CLK_SEL(port), crtc->ddi_pll_sel);
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(encoder);
+
+ intel_dp->DP = intel_dig_port->saved_port_bits |
+ DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
+ intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
if (port != PORT_A)
intel_dp_stop_link_train(intel_dp);
+ } else if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ intel_hdmi->set_infoframes(encoder,
+ crtc->config.has_hdmi_sink,
+ &crtc->config.adjusted_mode);
}
}
@@ -1385,7 +1354,8 @@
intel_edp_psr_enable(intel_dp);
}
- if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) {
+ if (intel_crtc->config.has_audio) {
+ intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
@@ -1403,11 +1373,14 @@
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
- if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) {
+ /* We can't touch HSW_AUD_PIN_ELD_CP_VLD uncionditionally because this
+ * register is part of the power well on Haswell. */
+ if (intel_crtc->config.has_audio) {
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) <<
(pipe * 4));
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
}
if (type == INTEL_OUTPUT_EDP) {
@@ -1580,6 +1553,7 @@
switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI:
+ pipe_config->has_hdmi_sink = true;
case TRANS_DDI_MODE_SELECT_DVI:
case TRANS_DDI_MODE_SELECT_FDI:
break;
@@ -1592,6 +1566,12 @@
break;
}
+ if (intel_display_power_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
+ temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ if (temp & (AUDIO_OUTPUT_ENABLE_A << (intel_crtc->pipe * 4)))
+ pipe_config->has_audio = true;
+ }
+
if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
/*
@@ -1708,7 +1688,6 @@
DRM_MODE_ENCODER_TMDS);
intel_encoder->compute_config = intel_ddi_compute_config;
- intel_encoder->mode_set = intel_ddi_mode_set;
intel_encoder->enable = intel_enable_ddi;
intel_encoder->pre_enable = intel_ddi_pre_enable;
intel_encoder->disable = intel_disable_ddi;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 72b5c34..c2976ad 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -41,6 +41,9 @@
#include <drm/drm_crtc_helper.h>
#include <linux/dma_remapping.h>
+#define DIV_ROUND_CLOSEST_ULL(ll, d) \
+ ({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
+
static void intel_increase_pllclock(struct drm_crtc *crtc);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
@@ -55,6 +58,15 @@
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
+static void intel_dp_set_m_n(struct intel_crtc *crtc);
+static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
+static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
+static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
+ struct intel_link_m_n *m_n);
+static void ironlake_set_pipeconf(struct drm_crtc *crtc);
+static void haswell_set_pipeconf(struct drm_crtc *crtc);
+static void intel_set_pipe_csc(struct drm_crtc *crtc);
+static void vlv_prepare_pll(struct intel_crtc *crtc);
typedef struct {
int min, max;
@@ -328,6 +340,22 @@
.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
};
+static const intel_limit_t intel_limits_chv = {
+ /*
+ * These are the data rate limits (measured in fast clocks)
+ * since those are the strictest limits we have. The fast
+ * clock and actual rate limits are more relaxed, so checking
+ * them would make no difference.
+ */
+ .dot = { .min = 25000 * 5, .max = 540000 * 5},
+ .vco = { .min = 4860000, .max = 6700000 },
+ .n = { .min = 1, .max = 1 },
+ .m1 = { .min = 2, .max = 2 },
+ .m2 = { .min = 24 << 22, .max = 175 << 22 },
+ .p1 = { .min = 2, .max = 4 },
+ .p2 = { .p2_slow = 1, .p2_fast = 14 },
+};
+
static void vlv_clock(int refclk, intel_clock_t *clock)
{
clock->m = clock->m1 * clock->m2;
@@ -412,6 +440,8 @@
limit = &intel_limits_pineview_lvds;
else
limit = &intel_limits_pineview_sdvo;
+ } else if (IS_CHERRYVIEW(dev)) {
+ limit = &intel_limits_chv;
} else if (IS_VALLEYVIEW(dev)) {
limit = &intel_limits_vlv;
} else if (!IS_GEN2(dev)) {
@@ -456,6 +486,17 @@
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
}
+static void chv_clock(int refclk, intel_clock_t *clock)
+{
+ clock->m = clock->m1 * clock->m2;
+ clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n == 0 || clock->p == 0))
+ return;
+ clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
+ clock->n << 22);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+}
+
#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
/**
* Returns whether the given set of divisors are valid for a given refclk with
@@ -731,6 +772,58 @@
return found;
}
+static bool
+chv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ intel_clock_t clock;
+ uint64_t m2;
+ int found = false;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ /*
+ * Based on hardware doc, the n always set to 1, and m1 always
+ * set to 2. If requires to support 200Mhz refclk, we need to
+ * revisit this because n may not 1 anymore.
+ */
+ clock.n = 1, clock.m1 = 2;
+ target *= 5; /* fast clock */
+
+ for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+ for (clock.p2 = limit->p2.p2_fast;
+ clock.p2 >= limit->p2.p2_slow;
+ clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+
+ clock.p = clock.p1 * clock.p2;
+
+ m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
+ clock.n) << 22, refclk * clock.m1);
+
+ if (m2 > INT_MAX/clock.m1)
+ continue;
+
+ clock.m2 = m2;
+
+ chv_clock(refclk, &clock);
+
+ if (!intel_PLL_is_valid(dev, limit, &clock))
+ continue;
+
+ /* based on hardware requirement, prefer bigger p
+ */
+ if (clock.p > best_clock->p) {
+ *best_clock = clock;
+ found = true;
+ }
+ }
+ }
+
+ return found;
+}
+
bool intel_crtc_active(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -878,7 +971,7 @@
u32 bit;
if (HAS_PCH_IBX(dev_priv->dev)) {
- switch(port->port) {
+ switch (port->port) {
case PORT_B:
bit = SDE_PORTB_HOTPLUG;
break;
@@ -892,7 +985,7 @@
return true;
}
} else {
- switch(port->port) {
+ switch (port->port) {
case PORT_B:
bit = SDE_PORTB_HOTPLUG_CPT;
break;
@@ -1097,10 +1190,8 @@
if (IS_845G(dev) || IS_I865G(dev))
cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
- else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev))
- cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
else
- cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
+ cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
WARN(cur_state != state,
"cursor on pipe %c assertion failure (expected %s, current %s)\n",
@@ -1253,6 +1344,9 @@
u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
return false;
+ } else if (IS_CHERRYVIEW(dev_priv->dev)) {
+ if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
+ return false;
} else {
if ((val & DP_PIPE_MASK) != (pipe << 30))
return false;
@@ -1269,6 +1363,9 @@
if (HAS_PCH_CPT(dev_priv->dev)) {
if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
return false;
+ } else if (IS_CHERRYVIEW(dev_priv->dev)) {
+ if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
+ return false;
} else {
if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
return false;
@@ -1367,7 +1464,17 @@
if (!IS_VALLEYVIEW(dev))
return;
- DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
+ /*
+ * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
+ * CHV x1 PHY (DP/HDMI D)
+ * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
+ */
+ if (IS_CHERRYVIEW(dev)) {
+ DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
+ DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
+ } else {
+ DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
+ }
}
static void intel_reset_dpio(struct drm_device *dev)
@@ -1385,17 +1492,42 @@
DPLL_REFA_CLK_ENABLE_VLV |
DPLL_INTEGRATED_CRI_CLK_VLV);
- /*
- * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
- * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
- * a. GUnit 0x2110 bit[0] set to 1 (def 0)
- * b. The other bits such as sfr settings / modesel may all be set
- * to 0.
- *
- * This should only be done on init and resume from S3 with both
- * PLLs disabled, or we risk losing DPIO and PLL synchronization.
- */
- I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+ if (IS_CHERRYVIEW(dev)) {
+ enum dpio_phy phy;
+ u32 val;
+
+ for (phy = DPIO_PHY0; phy < I915_NUM_PHYS_VLV; phy++) {
+ /* Poll for phypwrgood signal */
+ if (wait_for(I915_READ(DISPLAY_PHY_STATUS) &
+ PHY_POWERGOOD(phy), 1))
+ DRM_ERROR("Display PHY %d is not power up\n", phy);
+
+ /*
+ * Deassert common lane reset for PHY.
+ *
+ * This should only be done on init and resume from S3
+ * with both PLLs disabled, or we risk losing DPIO and
+ * PLL synchronization.
+ */
+ val = I915_READ(DISPLAY_PHY_CONTROL);
+ I915_WRITE(DISPLAY_PHY_CONTROL,
+ PHY_COM_LANE_RESET_DEASSERT(phy, val));
+ }
+
+ } else {
+ /*
+ * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+ * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
+ * a. GUnit 0x2110 bit[0] set to 1 (def 0)
+ * b. The other bits such as sfr settings / modesel may all
+ * be set to 0.
+ *
+ * This should only be done on init and resume from S3 with
+ * both PLLs disabled, or we risk losing DPIO and PLL
+ * synchronization.
+ */
+ I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+ }
}
static void vlv_enable_pll(struct intel_crtc *crtc)
@@ -1436,6 +1568,44 @@
udelay(150); /* wait for warmup */
}
+static void chv_enable_pll(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = crtc->pipe;
+ enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ u32 tmp;
+
+ assert_pipe_disabled(dev_priv, crtc->pipe);
+
+ BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* Enable back the 10bit clock to display controller */
+ tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
+ tmp |= DPIO_DCLKP_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
+
+ /*
+ * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
+ */
+ udelay(1);
+
+ /* Enable PLL */
+ I915_WRITE(DPLL(pipe), crtc->config.dpll_hw_state.dpll);
+
+ /* Check PLL is locked */
+ if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+ DRM_ERROR("PLL %d failed to lock\n", pipe);
+
+ /* not sure when this should be written */
+ I915_WRITE(DPLL_MD(pipe), crtc->config.dpll_hw_state.dpll_md);
+ POSTING_READ(DPLL_MD(pipe));
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
static void i9xx_enable_pll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -1519,45 +1689,92 @@
val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
I915_WRITE(DPLL(pipe), val);
POSTING_READ(DPLL(pipe));
+
+}
+
+static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ u32 val;
+
+ /* Make sure the pipe isn't still relying on us */
+ assert_pipe_disabled(dev_priv, pipe);
+
+ /* Set PLL en = 0 */
+ val = DPLL_SSC_REF_CLOCK_CHV;
+ if (pipe != PIPE_A)
+ val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+ I915_WRITE(DPLL(pipe), val);
+ POSTING_READ(DPLL(pipe));
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* Disable 10bit clock to display controller */
+ val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
+ val &= ~DPIO_DCLKP_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
+
+ mutex_unlock(&dev_priv->dpio_lock);
}
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport)
{
u32 port_mask;
+ int dpll_reg;
switch (dport->port) {
case PORT_B:
port_mask = DPLL_PORTB_READY_MASK;
+ dpll_reg = DPLL(0);
break;
case PORT_C:
port_mask = DPLL_PORTC_READY_MASK;
+ dpll_reg = DPLL(0);
+ break;
+ case PORT_D:
+ port_mask = DPLL_PORTD_READY_MASK;
+ dpll_reg = DPIO_PHY_STATUS;
break;
default:
BUG();
}
- if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000))
+ if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000))
WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
- port_name(dport->port), I915_READ(DPLL(0)));
+ port_name(dport->port), I915_READ(dpll_reg));
+}
+
+static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
+
+ WARN_ON(!pll->refcount);
+ if (pll->active == 0) {
+ DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
+ WARN_ON(pll->on);
+ assert_shared_dpll_disabled(dev_priv, pll);
+
+ pll->mode_set(dev_priv, pll);
+ }
}
/**
- * ironlake_enable_shared_dpll - enable PCH PLL
+ * intel_enable_shared_dpll - enable PCH PLL
* @dev_priv: i915 private structure
* @pipe: pipe PLL to enable
*
* The PCH PLL needs to be enabled before the PCH transcoder, since it
* drives the transcoder clock.
*/
-static void ironlake_enable_shared_dpll(struct intel_crtc *crtc)
+static void intel_enable_shared_dpll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
- /* PCH PLLs only available on ILK, SNB and IVB */
- BUG_ON(INTEL_INFO(dev)->gen < 5);
if (WARN_ON(pll == NULL))
return;
@@ -1891,7 +2108,6 @@
I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
intel_flush_primary_plane(dev_priv, plane);
- intel_wait_for_vblank(dev_priv->dev, pipe);
}
/**
@@ -1921,7 +2137,6 @@
I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
intel_flush_primary_plane(dev_priv, plane);
- intel_wait_for_vblank(dev_priv->dev, pipe);
}
static bool need_vtd_wa(struct drm_device *dev)
@@ -1944,7 +2159,7 @@
int
intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+ struct intel_engine_cs *pipelined)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 alignment;
@@ -2124,7 +2339,7 @@
* Failed to alloc the obj, check to see if we should share
* an fb with another CRTC instead
*/
- list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, c) {
i = to_intel_crtc(c);
if (c == &intel_crtc->base)
@@ -2142,9 +2357,9 @@
}
}
-static int i9xx_update_primary_plane(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y)
+static void i9xx_update_primary_plane(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2230,13 +2445,11 @@
} else
I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
POSTING_READ(reg);
-
- return 0;
}
-static int ironlake_update_primary_plane(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y)
+static void ironlake_update_primary_plane(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2314,8 +2527,6 @@
I915_WRITE(DSPLINOFF(plane), linear_offset);
}
POSTING_READ(reg);
-
- return 0;
}
/* Assume fb object is pinned & idle & fenced and just update base pointers */
@@ -2330,7 +2541,9 @@
dev_priv->display.disable_fbc(dev);
intel_increase_pllclock(crtc);
- return dev_priv->display.update_primary_plane(crtc, fb, x, y);
+ dev_priv->display.update_primary_plane(crtc, fb, x, y);
+
+ return 0;
}
void intel_display_handle_reset(struct drm_device *dev)
@@ -2352,7 +2565,7 @@
* pending_flip_queue really got woken up.
*/
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, crtc) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum plane plane = intel_crtc->plane;
@@ -2360,7 +2573,7 @@
intel_finish_page_flip_plane(dev, plane);
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, crtc) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
mutex_lock(&crtc->mutex);
@@ -2489,14 +2702,7 @@
intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
}
- ret = dev_priv->display.update_primary_plane(crtc, fb, x, y);
- if (ret) {
- mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
- mutex_unlock(&dev->struct_mutex);
- DRM_ERROR("failed to update base address\n");
- return ret;
- }
+ dev_priv->display.update_primary_plane(crtc, fb, x, y);
old_fb = crtc->primary->fb;
crtc->primary->fb = fb;
@@ -3033,9 +3239,8 @@
udelay(100);
/* Ironlake workaround, disable clock pointer after downing FDI */
- if (HAS_PCH_IBX(dev)) {
+ if (HAS_PCH_IBX(dev))
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
- }
/* still set train pattern 1 */
reg = FDI_TX_CTL(pipe);
@@ -3073,7 +3278,7 @@
* cannot claim and pin a new fb without at least acquring the
* struct_mutex and so serialising with us.
*/
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ for_each_intel_crtc(dev, crtc) {
if (atomic_read(&crtc->unpin_work_count) == 0)
continue;
@@ -3086,7 +3291,7 @@
return false;
}
-static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
+void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3096,8 +3301,9 @@
WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
- wait_event(dev_priv->pending_flip_queue,
- !intel_crtc_has_pending_flip(crtc));
+ WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
+ !intel_crtc_has_pending_flip(crtc),
+ 60*HZ) == 0);
mutex_lock(&dev->struct_mutex);
intel_finish_fb(crtc->primary->fb);
@@ -3310,7 +3516,7 @@
* Note that enable_shared_dpll tries to do the right thing, but
* get_shared_dpll unconditionally resets the pll - we need that to have
* the right LVDS enable sequence. */
- ironlake_enable_shared_dpll(intel_crtc);
+ intel_enable_shared_dpll(intel_crtc);
/* set transcoder timing, panel must allow it */
assert_panel_unlocked(dev_priv, pipe);
@@ -3414,6 +3620,8 @@
DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
crtc->base.base.id, pll->name);
+ WARN_ON(pll->refcount);
+
goto found;
}
@@ -3447,20 +3655,13 @@
return NULL;
found:
+ if (pll->refcount == 0)
+ pll->hw_state = crtc->config.dpll_hw_state;
+
crtc->config.shared_dpll = i;
DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
pipe_name(crtc->pipe));
- if (pll->active == 0) {
- memcpy(&pll->hw_state, &crtc->config.dpll_hw_state,
- sizeof(pll->hw_state));
-
- DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
- WARN_ON(pll->on);
- assert_shared_dpll_disabled(dev_priv, pll);
-
- pll->mode_set(dev_priv, pll);
- }
pll->refcount++;
return pll;
@@ -3531,17 +3732,17 @@
void hsw_enable_ips(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
if (!crtc->config.ips_enabled)
return;
- /* We can only enable IPS after we enable a plane and wait for a vblank.
- * We guarantee that the plane is enabled by calling intel_enable_ips
- * only after intel_enable_plane. And intel_enable_plane already waits
- * for a vblank, so all we need to do here is to enable the IPS bit. */
+ /* We can only enable IPS after we enable a plane and wait for a vblank */
+ intel_wait_for_vblank(dev, crtc->pipe);
+
assert_plane_enabled(dev_priv, crtc->plane);
- if (IS_BROADWELL(crtc->base.dev)) {
+ if (IS_BROADWELL(dev)) {
mutex_lock(&dev_priv->rps.hw_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -3634,7 +3835,49 @@
hsw_enable_ips(intel_crtc);
}
-static void ilk_crtc_enable_planes(struct drm_crtc *crtc)
+static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
+{
+ if (!enable && intel_crtc->overlay) {
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+ dev_priv->mm.interruptible = false;
+ (void) intel_overlay_switch_off(intel_crtc->overlay);
+ dev_priv->mm.interruptible = true;
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ /* Let userspace switch the overlay on again. In most cases userspace
+ * has to recompute where to put it anyway.
+ */
+}
+
+/**
+ * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
+ * cursor plane briefly if not already running after enabling the display
+ * plane.
+ * This workaround avoids occasional blank screens when self refresh is
+ * enabled.
+ */
+static void
+g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ u32 cntl = I915_READ(CURCNTR(pipe));
+
+ if ((cntl & CURSOR_MODE) == 0) {
+ u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
+
+ I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
+ I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
+ intel_wait_for_vblank(dev_priv->dev, pipe);
+ I915_WRITE(CURCNTR(pipe), cntl);
+ I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
+ I915_WRITE(FW_BLC_SELF, fw_bcl_self);
+ }
+}
+
+static void intel_crtc_enable_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3644,16 +3887,21 @@
intel_enable_primary_hw_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
+ /* The fixup needs to happen before cursor is enabled */
+ if (IS_G4X(dev))
+ g4x_fixup_plane(dev_priv, pipe);
intel_crtc_update_cursor(crtc, true);
+ intel_crtc_dpms_overlay(intel_crtc, true);
hsw_enable_ips(intel_crtc);
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
+ intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
}
-static void ilk_crtc_disable_planes(struct drm_crtc *crtc)
+static void intel_crtc_disable_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3662,13 +3910,14 @@
int plane = intel_crtc->plane;
intel_crtc_wait_for_pending_flips(crtc);
- drm_vblank_off(dev, pipe);
+ drm_crtc_vblank_off(crtc);
if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
hsw_disable_ips(intel_crtc);
+ intel_crtc_dpms_overlay(intel_crtc, false);
intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
intel_disable_primary_hw_plane(dev_priv, plane, pipe);
@@ -3681,12 +3930,35 @@
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
+ enum plane plane = intel_crtc->plane;
WARN_ON(!crtc->enabled);
if (intel_crtc->active)
return;
+ if (intel_crtc->config.has_pch_encoder)
+ intel_prepare_shared_dpll(intel_crtc);
+
+ if (intel_crtc->config.has_dp_encoder)
+ intel_dp_set_m_n(intel_crtc);
+
+ intel_set_pipe_timings(intel_crtc);
+
+ if (intel_crtc->config.has_pch_encoder) {
+ intel_cpu_transcoder_set_m_n(intel_crtc,
+ &intel_crtc->config.fdi_m_n);
+ }
+
+ ironlake_set_pipeconf(crtc);
+
+ /* Set up the display plane register */
+ I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
+ POSTING_READ(DSPCNTR(plane));
+
+ dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
+ crtc->x, crtc->y);
+
intel_crtc->active = true;
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
@@ -3726,17 +3998,9 @@
if (HAS_PCH_CPT(dev))
cpt_verify_modeset(dev, intel_crtc->pipe);
- ilk_crtc_enable_planes(crtc);
+ intel_crtc_enable_planes(crtc);
- /*
- * There seems to be a race in PCH platform hw (at least on some
- * outputs) where an enabled pipe still completes any pageflip right
- * away (as if the pipe is off) instead of waiting for vblank. As soon
- * as the first vblank happend, everything works as expected. Hence just
- * wait for one vblank before returning to avoid strange things
- * happening.
- */
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ drm_crtc_vblank_on(crtc);
}
/* IPS only exists on ULT machines and is tied to pipe A. */
@@ -3758,7 +4022,7 @@
/* We want to get the other_active_crtc only if there's only 1 other
* active crtc. */
- list_for_each_entry(crtc_it, &dev->mode_config.crtc_list, base.head) {
+ for_each_intel_crtc(dev, crtc_it) {
if (!crtc_it->active || crtc_it == crtc)
continue;
@@ -3781,12 +4045,34 @@
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
+ enum plane plane = intel_crtc->plane;
WARN_ON(!crtc->enabled);
if (intel_crtc->active)
return;
+ if (intel_crtc->config.has_dp_encoder)
+ intel_dp_set_m_n(intel_crtc);
+
+ intel_set_pipe_timings(intel_crtc);
+
+ if (intel_crtc->config.has_pch_encoder) {
+ intel_cpu_transcoder_set_m_n(intel_crtc,
+ &intel_crtc->config.fdi_m_n);
+ }
+
+ haswell_set_pipeconf(crtc);
+
+ intel_set_pipe_csc(crtc);
+
+ /* Set up the display plane register */
+ I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
+ POSTING_READ(DSPCNTR(plane));
+
+ dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
+ crtc->x, crtc->y);
+
intel_crtc->active = true;
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
@@ -3827,7 +4113,9 @@
/* If we change the relative order between pipe/planes enabling, we need
* to change the workaround. */
haswell_mode_set_planes_workaround(intel_crtc);
- ilk_crtc_enable_planes(crtc);
+ intel_crtc_enable_planes(crtc);
+
+ drm_crtc_vblank_on(crtc);
}
static void ironlake_pfit_disable(struct intel_crtc *crtc)
@@ -3857,7 +4145,7 @@
if (!intel_crtc->active)
return;
- ilk_crtc_disable_planes(crtc);
+ intel_crtc_disable_planes(crtc);
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
@@ -3905,6 +4193,7 @@
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
+ intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
}
@@ -3920,7 +4209,7 @@
if (!intel_crtc->active)
return;
- ilk_crtc_disable_planes(crtc);
+ intel_crtc_disable_planes(crtc);
for_each_encoder_on_crtc(dev, crtc, encoder) {
intel_opregion_notify_encoder(encoder, false);
@@ -3952,6 +4241,7 @@
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
+ intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
}
@@ -3966,48 +4256,6 @@
intel_ddi_put_crtc_pll(crtc);
}
-static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
-{
- if (!enable && intel_crtc->overlay) {
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- mutex_lock(&dev->struct_mutex);
- dev_priv->mm.interruptible = false;
- (void) intel_overlay_switch_off(intel_crtc->overlay);
- dev_priv->mm.interruptible = true;
- mutex_unlock(&dev->struct_mutex);
- }
-
- /* Let userspace switch the overlay on again. In most cases userspace
- * has to recompute where to put it anyway.
- */
-}
-
-/**
- * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
- * cursor plane briefly if not already running after enabling the display
- * plane.
- * This workaround avoids occasional blank screens when self refresh is
- * enabled.
- */
-static void
-g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
- u32 cntl = I915_READ(CURCNTR(pipe));
-
- if ((cntl & CURSOR_MODE) == 0) {
- u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
-
- I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
- I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
- intel_wait_for_vblank(dev_priv->dev, pipe);
- I915_WRITE(CURCNTR(pipe), cntl);
- I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
- I915_WRITE(FW_BLC_SELF, fw_bcl_self);
- }
-}
-
static void i9xx_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -4119,7 +4367,7 @@
* First get all needed power domains, then put all unneeded, to avoid
* any unnecessary toggling of the power wells.
*/
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ for_each_intel_crtc(dev, crtc) {
enum intel_display_power_domain domain;
if (!crtc->base.enabled)
@@ -4131,7 +4379,7 @@
intel_display_power_get(dev_priv, domain);
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ for_each_intel_crtc(dev, crtc) {
enum intel_display_power_domain domain;
for_each_power_domain(domain, crtc->enabled_power_domains)
@@ -4265,8 +4513,7 @@
struct intel_crtc *intel_crtc;
int max_pixclk = 0;
- list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
- base.head) {
+ for_each_intel_crtc(dev, intel_crtc) {
if (intel_crtc->new_enabled)
max_pixclk = max(max_pixclk,
intel_crtc->new_config->adjusted_mode.crtc_clock);
@@ -4287,8 +4534,7 @@
return;
/* disable/enable all currently active pipes while we change cdclk */
- list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
- base.head)
+ for_each_intel_crtc(dev, intel_crtc)
if (intel_crtc->base.enabled)
*prepare_pipes |= (1 << intel_crtc->pipe);
}
@@ -4313,22 +4559,55 @@
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
bool is_dsi;
+ u32 dspcntr;
WARN_ON(!crtc->enabled);
if (intel_crtc->active)
return;
+ vlv_prepare_pll(intel_crtc);
+
+ /* Set up the display plane register */
+ dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+ if (intel_crtc->config.has_dp_encoder)
+ intel_dp_set_m_n(intel_crtc);
+
+ intel_set_pipe_timings(intel_crtc);
+
+ /* pipesrc and dspsize control the size that is scaled from,
+ * which should always be the user's requested size.
+ */
+ I915_WRITE(DSPSIZE(plane),
+ ((intel_crtc->config.pipe_src_h - 1) << 16) |
+ (intel_crtc->config.pipe_src_w - 1));
+ I915_WRITE(DSPPOS(plane), 0);
+
+ i9xx_set_pipeconf(intel_crtc);
+
+ I915_WRITE(DSPCNTR(plane), dspcntr);
+ POSTING_READ(DSPCNTR(plane));
+
+ dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
+ crtc->x, crtc->y);
+
intel_crtc->active = true;
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
- if (!is_dsi)
- vlv_enable_pll(intel_crtc);
+ if (!is_dsi) {
+ if (IS_CHERRYVIEW(dev))
+ chv_enable_pll(intel_crtc);
+ else
+ vlv_enable_pll(intel_crtc);
+ }
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
@@ -4340,17 +4619,25 @@
intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
- intel_wait_for_vblank(dev_priv->dev, pipe);
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
-
- intel_enable_primary_hw_plane(dev_priv, plane, pipe);
- intel_enable_planes(crtc);
- intel_crtc_update_cursor(crtc, true);
-
- intel_update_fbc(dev);
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
+
+ intel_crtc_enable_planes(crtc);
+
+ drm_crtc_vblank_on(crtc);
+
+ /* Underruns don't raise interrupts, so check manually. */
+ i9xx_check_fifo_underruns(dev);
+}
+
+static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(FP0(crtc->pipe), crtc->config.dpll_hw_state.fp0);
+ I915_WRITE(FP1(crtc->pipe), crtc->config.dpll_hw_state.fp1);
}
static void i9xx_crtc_enable(struct drm_crtc *crtc)
@@ -4361,14 +4648,49 @@
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
+ u32 dspcntr;
WARN_ON(!crtc->enabled);
if (intel_crtc->active)
return;
+ i9xx_set_pll_dividers(intel_crtc);
+
+ /* Set up the display plane register */
+ dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+ if (pipe == 0)
+ dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
+ else
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+ if (intel_crtc->config.has_dp_encoder)
+ intel_dp_set_m_n(intel_crtc);
+
+ intel_set_pipe_timings(intel_crtc);
+
+ /* pipesrc and dspsize control the size that is scaled from,
+ * which should always be the user's requested size.
+ */
+ I915_WRITE(DSPSIZE(plane),
+ ((intel_crtc->config.pipe_src_h - 1) << 16) |
+ (intel_crtc->config.pipe_src_w - 1));
+ I915_WRITE(DSPPOS(plane), 0);
+
+ i9xx_set_pipeconf(intel_crtc);
+
+ I915_WRITE(DSPCNTR(plane), dspcntr);
+ POSTING_READ(DSPCNTR(plane));
+
+ dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
+ crtc->x, crtc->y);
+
intel_crtc->active = true;
+ if (!IS_GEN2(dev))
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
@@ -4381,23 +4703,26 @@
intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
- intel_wait_for_vblank(dev_priv->dev, pipe);
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
-
- intel_enable_primary_hw_plane(dev_priv, plane, pipe);
- intel_enable_planes(crtc);
- /* The fixup needs to happen before cursor is enabled */
- if (IS_G4X(dev))
- g4x_fixup_plane(dev_priv, pipe);
- intel_crtc_update_cursor(crtc, true);
-
- /* Give the overlay scaler a chance to enable if it's on this pipe */
- intel_crtc_dpms_overlay(intel_crtc, true);
-
- intel_update_fbc(dev);
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
+
+ intel_crtc_enable_planes(crtc);
+
+ /*
+ * Gen2 reports pipe underruns whenever all planes are disabled.
+ * So don't enable underrun reporting before at least some planes
+ * are enabled.
+ * FIXME: Need to fix the logic to work when we turn off all planes
+ * but leave the pipe running.
+ */
+ if (IS_GEN2(dev))
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+
+ drm_crtc_vblank_on(crtc);
+
+ /* Underruns don't raise interrupts, so check manually. */
+ i9xx_check_fifo_underruns(dev);
}
static void i9xx_pfit_disable(struct intel_crtc *crtc)
@@ -4422,27 +4747,31 @@
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
if (!intel_crtc->active)
return;
+ /*
+ * Gen2 reports pipe underruns whenever all planes are disabled.
+ * So diasble underrun reporting before all the planes get disabled.
+ * FIXME: Need to fix the logic to work when we turn off all planes
+ * but leave the pipe running.
+ */
+ if (IS_GEN2(dev))
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
+
+ intel_crtc_disable_planes(crtc);
+
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
- /* Give the overlay scaler a chance to disable if it's on this pipe */
- intel_crtc_wait_for_pending_flips(crtc);
- drm_vblank_off(dev, pipe);
+ /*
+ * On gen2 planes are double buffered but the pipe isn't, so we must
+ * wait for planes to fully turn off before disabling the pipe.
+ */
+ if (IS_GEN2(dev))
+ intel_wait_for_vblank(dev, pipe);
- if (dev_priv->fbc.plane == plane)
- intel_disable_fbc(dev);
-
- intel_crtc_dpms_overlay(intel_crtc, false);
- intel_crtc_update_cursor(crtc, false);
- intel_disable_planes(crtc);
- intel_disable_primary_hw_plane(dev_priv, plane, pipe);
-
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
intel_disable_pipe(dev_priv, pipe);
i9xx_pfit_disable(intel_crtc);
@@ -4451,15 +4780,25 @@
if (encoder->post_disable)
encoder->post_disable(encoder);
- if (IS_VALLEYVIEW(dev) && !intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
- vlv_disable_pll(dev_priv, pipe);
- else if (!IS_VALLEYVIEW(dev))
- i9xx_disable_pll(dev_priv, pipe);
+ if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) {
+ if (IS_CHERRYVIEW(dev))
+ chv_disable_pll(dev_priv, pipe);
+ else if (IS_VALLEYVIEW(dev))
+ vlv_disable_pll(dev_priv, pipe);
+ else
+ i9xx_disable_pll(dev_priv, pipe);
+ }
+
+ if (!IS_GEN2(dev))
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
intel_crtc->active = false;
intel_update_watermarks(crtc);
+ mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
+ intel_edp_psr_update(dev);
+ mutex_unlock(&dev->struct_mutex);
}
static void i9xx_crtc_off(struct drm_crtc *crtc)
@@ -4522,13 +4861,11 @@
struct drm_device *dev = crtc->dev;
struct drm_connector *connector;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
/* crtc should still be enabled when we disable it. */
WARN_ON(!crtc->enabled);
dev_priv->display.crtc_disable(crtc);
- intel_crtc->eld_vld = false;
intel_crtc_update_sarea(crtc, false);
dev_priv->display.off(crtc);
@@ -4592,7 +4929,7 @@
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.base.id,
- drm_get_connector_name(&connector->base));
+ connector->base.name);
WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
"wrong connector dpms state\n");
@@ -4996,8 +5333,6 @@
intel_clock_t *reduced_clock)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe = crtc->pipe;
u32 fp, fp2 = 0;
if (IS_PINEVIEW(dev)) {
@@ -5010,17 +5345,14 @@
fp2 = i9xx_dpll_compute_fp(reduced_clock);
}
- I915_WRITE(FP0(pipe), fp);
crtc->config.dpll_hw_state.fp0 = fp;
crtc->lowfreq_avail = false;
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
reduced_clock && i915.powersave) {
- I915_WRITE(FP1(pipe), fp2);
crtc->config.dpll_hw_state.fp1 = fp2;
crtc->lowfreq_avail = true;
} else {
- I915_WRITE(FP1(pipe), fp);
crtc->config.dpll_hw_state.fp1 = fp;
}
}
@@ -5098,12 +5430,34 @@
static void vlv_update_pll(struct intel_crtc *crtc)
{
+ u32 dpll, dpll_md;
+
+ /*
+ * Enable DPIO clock input. We should never disable the reference
+ * clock for pipe B, since VGA hotplug / manual detection depends
+ * on it.
+ */
+ dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
+ DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
+ /* We should never disable this, set it here for state tracking */
+ if (crtc->pipe == PIPE_B)
+ dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+ dpll |= DPLL_VCO_ENABLE;
+ crtc->config.dpll_hw_state.dpll = dpll;
+
+ dpll_md = (crtc->config.pixel_multiplier - 1)
+ << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ crtc->config.dpll_hw_state.dpll_md = dpll_md;
+}
+
+static void vlv_prepare_pll(struct intel_crtc *crtc)
+{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
- u32 dpll, mdiv;
+ u32 mdiv;
u32 bestn, bestm1, bestm2, bestp1, bestp2;
- u32 coreclk, reg_val, dpll_md;
+ u32 coreclk, reg_val;
mutex_lock(&dev_priv->dpio_lock);
@@ -5116,7 +5470,7 @@
/* See eDP HDMI DPIO driver vbios notes doc */
/* PLL B needs special handling */
- if (pipe)
+ if (pipe == PIPE_B)
vlv_pllb_recal_opamp(dev_priv, pipe);
/* Set up Tx target for periodic Rcomp update */
@@ -5160,7 +5514,7 @@
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
/* Use SSC source */
- if (!pipe)
+ if (pipe == PIPE_A)
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
0x0df40000);
else
@@ -5168,7 +5522,7 @@
0x0df70000);
} else { /* HDMI or VGA */
/* Use bend source */
- if (!pipe)
+ if (pipe == PIPE_A)
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
0x0df70000);
else
@@ -5184,23 +5538,84 @@
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
+static void chv_update_pll(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = crtc->pipe;
+ int dpll_reg = DPLL(crtc->pipe);
+ enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ u32 loopfilter, intcoeff;
+ u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
+ int refclk;
+
+ crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
+ DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
+ DPLL_VCO_ENABLE;
+ if (pipe != PIPE_A)
+ crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ crtc->config.dpll_hw_state.dpll_md =
+ (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+
+ bestn = crtc->config.dpll.n;
+ bestm2_frac = crtc->config.dpll.m2 & 0x3fffff;
+ bestm1 = crtc->config.dpll.m1;
+ bestm2 = crtc->config.dpll.m2 >> 22;
+ bestp1 = crtc->config.dpll.p1;
+ bestp2 = crtc->config.dpll.p2;
/*
- * Enable DPIO clock input. We should never disable the reference
- * clock for pipe B, since VGA hotplug / manual detection depends
- * on it.
+ * Enable Refclk and SSC
*/
- dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
- DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
- /* We should never disable this, set it here for state tracking */
- if (pipe == PIPE_B)
- dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
- dpll |= DPLL_VCO_ENABLE;
- crtc->config.dpll_hw_state.dpll = dpll;
+ I915_WRITE(dpll_reg,
+ crtc->config.dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
- dpll_md = (crtc->config.pixel_multiplier - 1)
- << DPLL_MD_UDI_MULTIPLIER_SHIFT;
- crtc->config.dpll_hw_state.dpll_md = dpll_md;
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* p1 and p2 divider */
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
+ 5 << DPIO_CHV_S1_DIV_SHIFT |
+ bestp1 << DPIO_CHV_P1_DIV_SHIFT |
+ bestp2 << DPIO_CHV_P2_DIV_SHIFT |
+ 1 << DPIO_CHV_K_DIV_SHIFT);
+
+ /* Feedback post-divider - m2 */
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
+
+ /* Feedback refclk divider - n and m1 */
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
+ DPIO_CHV_M1_DIV_BY_2 |
+ 1 << DPIO_CHV_N_DIV_SHIFT);
+
+ /* M2 fraction division */
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
+
+ /* M2 fraction division enable */
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port),
+ DPIO_CHV_FRAC_DIV_EN |
+ (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
+
+ /* Loop filter */
+ refclk = i9xx_get_refclk(&crtc->base, 0);
+ loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
+ 2 << DPIO_CHV_GAIN_CTRL_SHIFT;
+ if (refclk == 100000)
+ intcoeff = 11;
+ else if (refclk == 38400)
+ intcoeff = 10;
+ else
+ intcoeff = 9;
+ loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT;
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
+
+ /* AFC Recal */
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
+ vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
+ DPIO_AFC_RECAL);
mutex_unlock(&dev_priv->dpio_lock);
}
@@ -5518,16 +5933,12 @@
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
- u32 dspcntr;
bool ok, has_reduced_clock = false;
bool is_lvds = false, is_dsi = false;
struct intel_encoder *encoder;
const intel_limit_t *limit;
- int ret;
for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
@@ -5543,7 +5954,7 @@
}
if (is_dsi)
- goto skip_dpll;
+ return 0;
if (!intel_crtc->config.clock_set) {
refclk = i9xx_get_refclk(crtc, num_connectors);
@@ -5588,46 +5999,17 @@
i8xx_update_pll(intel_crtc,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
+ } else if (IS_CHERRYVIEW(dev)) {
+ chv_update_pll(intel_crtc);
} else if (IS_VALLEYVIEW(dev)) {
vlv_update_pll(intel_crtc);
} else {
i9xx_update_pll(intel_crtc,
has_reduced_clock ? &reduced_clock : NULL,
- num_connectors);
+ num_connectors);
}
-skip_dpll:
- /* Set up the display plane register */
- dspcntr = DISPPLANE_GAMMA_ENABLE;
-
- if (!IS_VALLEYVIEW(dev)) {
- if (pipe == 0)
- dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
- else
- dspcntr |= DISPPLANE_SEL_PIPE_B;
- }
-
- if (intel_crtc->config.has_dp_encoder)
- intel_dp_set_m_n(intel_crtc);
-
- intel_set_pipe_timings(intel_crtc);
-
- /* pipesrc and dspsize control the size that is scaled from,
- * which should always be the user's requested size.
- */
- I915_WRITE(DSPSIZE(plane),
- ((intel_crtc->config.pipe_src_h - 1) << 16) |
- (intel_crtc->config.pipe_src_w - 1));
- I915_WRITE(DSPPOS(plane), 0);
-
- i9xx_set_pipeconf(intel_crtc);
-
- I915_WRITE(DSPCNTR(plane), dspcntr);
- POSTING_READ(DSPCNTR(plane));
-
- ret = intel_pipe_set_base(crtc, x, y, fb);
-
- return ret;
+ return 0;
}
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
@@ -5747,6 +6129,36 @@
}
+static void chv_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = pipe_config->cpu_transcoder;
+ enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ intel_clock_t clock;
+ u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2;
+ int refclk = 100000;
+
+ mutex_lock(&dev_priv->dpio_lock);
+ cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
+ pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
+ pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
+ pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
+ clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff);
+ clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
+ clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
+ clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
+
+ chv_clock(refclk, &clock);
+
+ /* clock.dot is the fast clock */
+ pipe_config->port_clock = clock.dot / 5;
+}
+
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
@@ -5781,6 +6193,9 @@
}
}
+ if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT))
+ pipe_config->limited_color_range = true;
+
if (INTEL_INFO(dev)->gen < 4)
pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
@@ -5816,7 +6231,9 @@
DPLL_PORTB_READY_MASK);
}
- if (IS_VALLEYVIEW(dev))
+ if (IS_CHERRYVIEW(dev))
+ chv_crtc_clock_get(crtc, pipe_config);
+ else if (IS_VALLEYVIEW(dev))
vlv_crtc_clock_get(crtc, pipe_config);
else
i9xx_crtc_clock_get(crtc, pipe_config);
@@ -5937,8 +6354,7 @@
if (intel_panel_use_ssc(dev_priv) && can_ssc) {
DRM_DEBUG_KMS("Using SSC on eDP\n");
val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
- }
- else
+ } else
val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
} else
val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
@@ -6517,10 +6933,7 @@
struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
int num_connectors = 0;
intel_clock_t clock, reduced_clock;
u32 dpll = 0, fp = 0, fp2 = 0;
@@ -6528,7 +6941,6 @@
bool is_lvds = false;
struct intel_encoder *encoder;
struct intel_shared_dpll *pll;
- int ret;
for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
@@ -6578,36 +6990,18 @@
pll = intel_get_shared_dpll(intel_crtc);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
- pipe_name(pipe));
+ pipe_name(intel_crtc->pipe));
return -EINVAL;
}
} else
intel_put_shared_dpll(intel_crtc);
- if (intel_crtc->config.has_dp_encoder)
- intel_dp_set_m_n(intel_crtc);
-
if (is_lvds && has_reduced_clock && i915.powersave)
intel_crtc->lowfreq_avail = true;
else
intel_crtc->lowfreq_avail = false;
- intel_set_pipe_timings(intel_crtc);
-
- if (intel_crtc->config.has_pch_encoder) {
- intel_cpu_transcoder_set_m_n(intel_crtc,
- &intel_crtc->config.fdi_m_n);
- }
-
- ironlake_set_pipeconf(crtc);
-
- /* Set up the display plane register */
- I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
- POSTING_READ(DSPCNTR(plane));
-
- ret = intel_pipe_set_base(crtc, x, y, fb);
-
- return ret;
+ return 0;
}
static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
@@ -6785,6 +7179,9 @@
break;
}
+ if (tmp & PIPECONF_COLOR_RANGE_SELECT)
+ pipe_config->limited_color_range = true;
+
if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
struct intel_shared_dpll *pll;
@@ -6835,7 +7232,7 @@
struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
struct intel_crtc *crtc;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
+ for_each_intel_crtc(dev, crtc)
WARN(crtc->active, "CRTC for pipe %c enabled\n",
pipe_name(crtc->pipe));
@@ -7070,39 +7467,15 @@
int x, int y,
struct drm_framebuffer *fb)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane = intel_crtc->plane;
- int ret;
if (!intel_ddi_pll_select(intel_crtc))
return -EINVAL;
intel_ddi_pll_enable(intel_crtc);
- if (intel_crtc->config.has_dp_encoder)
- intel_dp_set_m_n(intel_crtc);
-
intel_crtc->lowfreq_avail = false;
- intel_set_pipe_timings(intel_crtc);
-
- if (intel_crtc->config.has_pch_encoder) {
- intel_cpu_transcoder_set_m_n(intel_crtc,
- &intel_crtc->config.fdi_m_n);
- }
-
- haswell_set_pipeconf(crtc);
-
- intel_set_pipe_csc(crtc);
-
- /* Set up the display plane register */
- I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
- POSTING_READ(DSPCNTR(plane));
-
- ret = intel_pipe_set_base(crtc, x, y, fb);
-
- return ret;
+ return 0;
}
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
@@ -7182,40 +7555,6 @@
return true;
}
-static int intel_crtc_mode_set(struct drm_crtc *crtc,
- int x, int y,
- struct drm_framebuffer *fb)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *encoder;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
- int pipe = intel_crtc->pipe;
- int ret;
-
- drm_vblank_pre_modeset(dev, pipe);
-
- ret = dev_priv->display.crtc_mode_set(crtc, x, y, fb);
-
- drm_vblank_post_modeset(dev, pipe);
-
- if (ret != 0)
- return ret;
-
- for_each_encoder_on_crtc(dev, crtc, encoder) {
- DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
- encoder->base.base.id,
- drm_get_encoder_name(&encoder->base),
- mode->base.id, mode->name);
-
- if (encoder->mode_set)
- encoder->mode_set(encoder);
- }
-
- return 0;
-}
-
static struct {
int clock;
u32 config;
@@ -7330,7 +7669,6 @@
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t eldv;
uint32_t i;
int len;
@@ -7369,7 +7707,6 @@
DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
eldv = AUDIO_ELD_VALID_A << (pipe * 4);
- intel_crtc->eld_vld = true;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
@@ -7516,9 +7853,9 @@
DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id,
- drm_get_connector_name(connector),
+ connector->name,
connector->encoder->base.id,
- drm_get_encoder_name(connector->encoder));
+ connector->encoder->name);
connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
@@ -7610,7 +7947,7 @@
if (intel_crtc->cursor_visible != visible) {
int16_t width = intel_crtc->cursor_width;
- uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
+ uint32_t cntl = I915_READ(CURCNTR(pipe));
if (base) {
cntl &= ~CURSOR_MODE;
cntl |= MCURSOR_GAMMA_ENABLE;
@@ -7636,14 +7973,14 @@
cntl |= CURSOR_PIPE_CSC_ENABLE;
cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
}
- I915_WRITE(CURCNTR_IVB(pipe), cntl);
+ I915_WRITE(CURCNTR(pipe), cntl);
intel_crtc->cursor_visible = visible;
}
/* and commit changes on next vblank */
- POSTING_READ(CURCNTR_IVB(pipe));
- I915_WRITE(CURBASE_IVB(pipe), base);
- POSTING_READ(CURBASE_IVB(pipe));
+ POSTING_READ(CURCNTR(pipe));
+ I915_WRITE(CURBASE(pipe), base);
+ POSTING_READ(CURBASE(pipe));
}
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
@@ -7690,16 +8027,14 @@
if (!visible && !intel_crtc->cursor_visible)
return;
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) {
- I915_WRITE(CURPOS_IVB(pipe), pos);
+ I915_WRITE(CURPOS(pipe), pos);
+
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
ivb_update_cursor(crtc, base);
- } else {
- I915_WRITE(CURPOS(pipe), pos);
- if (IS_845G(dev) || IS_I865G(dev))
- i845_update_cursor(crtc, base);
- else
- i9xx_update_cursor(crtc, base);
- }
+ else if (IS_845G(dev) || IS_I865G(dev))
+ i845_update_cursor(crtc, base);
+ else
+ i9xx_update_cursor(crtc, base);
}
static int intel_crtc_cursor_set(struct drm_crtc *crtc,
@@ -7985,8 +8320,10 @@
int i = -1;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
- connector->base.id, drm_get_connector_name(connector),
- encoder->base.id, drm_get_encoder_name(encoder));
+ connector->base.id, connector->name,
+ encoder->base.id, encoder->name);
+
+ mutex_lock(&dev->mode_config.connection_mutex);
/*
* Algorithm gets a little messy:
@@ -8015,7 +8352,7 @@
}
/* Find an unused one (if possible) */
- list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, possible_crtc) {
i++;
if (!(encoder->possible_crtcs & (1 << i)))
continue;
@@ -8030,7 +8367,7 @@
*/
if (!crtc) {
DRM_DEBUG_KMS("no pipe available for load-detect\n");
- return false;
+ goto fail_unlock_connector;
}
mutex_lock(&crtc->mutex);
@@ -8084,6 +8421,9 @@
else
intel_crtc->new_config = NULL;
mutex_unlock(&crtc->mutex);
+fail_unlock_connector:
+ mutex_unlock(&dev->mode_config.connection_mutex);
+
return false;
}
@@ -8097,8 +8437,8 @@
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
- connector->base.id, drm_get_connector_name(connector),
- encoder->base.id, drm_get_encoder_name(encoder));
+ connector->base.id, connector->name,
+ encoder->base.id, encoder->name);
if (old->load_detect_temp) {
to_intel_connector(connector)->new_encoder = NULL;
@@ -8113,6 +8453,7 @@
}
mutex_unlock(&crtc->mutex);
+ mutex_unlock(&connector->dev->mode_config.connection_mutex);
return;
}
@@ -8121,6 +8462,7 @@
connector->funcs->dpms(connector, old->dpms_mode);
mutex_unlock(&crtc->mutex);
+ mutex_unlock(&connector->dev->mode_config.connection_mutex);
}
static int i9xx_pll_refclk(struct drm_device *dev,
@@ -8406,7 +8748,7 @@
if (!i915.powersave)
goto out;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, crtc) {
if (!crtc->primary->fb)
continue;
@@ -8421,7 +8763,7 @@
}
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
@@ -8429,7 +8771,7 @@
if (!i915.powersave)
return;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, crtc) {
if (!crtc->primary->fb)
continue;
@@ -8517,7 +8859,7 @@
if (work->event)
drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
- drm_vblank_put(dev, intel_crtc->pipe);
+ drm_crtc_vblank_put(crtc);
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -8544,6 +8886,48 @@
do_intel_finish_page_flip(dev, crtc);
}
+/* Is 'a' after or equal to 'b'? */
+static bool g4x_flip_count_after_eq(u32 a, u32 b)
+{
+ return !((a - b) & 0x80000000);
+}
+
+static bool page_flip_finished(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * The relevant registers doen't exist on pre-ctg.
+ * As the flip done interrupt doesn't trigger for mmio
+ * flips on gmch platforms, a flip count check isn't
+ * really needed there. But since ctg has the registers,
+ * include it in the check anyway.
+ */
+ if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
+ return true;
+
+ /*
+ * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
+ * used the same base address. In that case the mmio flip might
+ * have completed, but the CS hasn't even executed the flip yet.
+ *
+ * A flip count check isn't enough as the CS might have updated
+ * the base address just after start of vblank, but before we
+ * managed to process the interrupt. This means we'd complete the
+ * CS flip too soon.
+ *
+ * Combining both checks should get us a good enough result. It may
+ * still happen that the CS flip has been executed, but has not
+ * yet actually completed. But in case the base address is the same
+ * anyway, we don't really care.
+ */
+ return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
+ crtc->unpin_work->gtt_offset &&
+ g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_GM45(crtc->pipe)),
+ crtc->unpin_work->flip_count);
+}
+
void intel_prepare_page_flip(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -8556,12 +8940,12 @@
* is also accompanied by a spurious intel_prepare_page_flip().
*/
spin_lock_irqsave(&dev->event_lock, flags);
- if (intel_crtc->unpin_work)
+ if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
-inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
+static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
{
/* Ensure that the work item is consistent when activating it ... */
smp_wmb();
@@ -8574,21 +8958,16 @@
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
uint32_t flags)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
- ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
- if (ret)
- goto err;
-
ret = intel_ring_begin(ring, 6);
if (ret)
- goto err_unpin;
+ return ret;
/* Can't queue multiple flips, so wait for the previous
* one to finish before executing the next.
@@ -8602,38 +8981,28 @@
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(ring, 0); /* aux display base address, unused */
intel_mark_page_flip_active(intel_crtc);
__intel_ring_advance(ring);
return 0;
-
-err_unpin:
- intel_unpin_fb_obj(obj);
-err:
- return ret;
}
static int intel_gen3_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
uint32_t flags)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
- ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
- if (ret)
- goto err;
-
ret = intel_ring_begin(ring, 6);
if (ret)
- goto err_unpin;
+ return ret;
if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
@@ -8644,38 +9013,29 @@
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(ring, MI_NOOP);
intel_mark_page_flip_active(intel_crtc);
__intel_ring_advance(ring);
return 0;
-
-err_unpin:
- intel_unpin_fb_obj(obj);
-err:
- return ret;
}
static int intel_gen4_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
- ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
- if (ret)
- goto err;
-
ret = intel_ring_begin(ring, 4);
if (ret)
- goto err_unpin;
+ return ret;
/* i965+ uses the linear or tiled offsets from the
* Display Registers (which do not change across a page-flip)
@@ -8684,8 +9044,7 @@
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring,
- (i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
+ intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
@@ -8699,37 +9058,28 @@
intel_mark_page_flip_active(intel_crtc);
__intel_ring_advance(ring);
return 0;
-
-err_unpin:
- intel_unpin_fb_obj(obj);
-err:
- return ret;
}
static int intel_gen6_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
uint32_t pf, pipesrc;
int ret;
- ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
- if (ret)
- goto err;
-
ret = intel_ring_begin(ring, 4);
if (ret)
- goto err_unpin;
+ return ret;
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
- intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
@@ -8744,34 +9094,20 @@
intel_mark_page_flip_active(intel_crtc);
__intel_ring_advance(ring);
return 0;
-
-err_unpin:
- intel_unpin_fb_obj(obj);
-err:
- return ret;
}
static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
uint32_t flags)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_ring_buffer *ring;
uint32_t plane_bit = 0;
int len, ret;
- ring = obj->ring;
- if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS)
- ring = &dev_priv->ring[BCS];
-
- ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
- if (ret)
- goto err;
-
- switch(intel_crtc->plane) {
+ switch (intel_crtc->plane) {
case PLANE_A:
plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
break;
@@ -8783,8 +9119,7 @@
break;
default:
WARN_ONCE(1, "unknown plane in flip command\n");
- ret = -ENODEV;
- goto err_unpin;
+ return -ENODEV;
}
len = 4;
@@ -8811,11 +9146,11 @@
*/
ret = intel_ring_cacheline_align(ring);
if (ret)
- goto err_unpin;
+ return ret;
ret = intel_ring_begin(ring, len);
if (ret)
- goto err_unpin;
+ return ret;
/* Unmask the flip-done completion message. Note that the bspec says that
* we should do this for both the BCS and RCS, and that we must not unmask
@@ -8848,23 +9183,19 @@
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
- intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(ring, (MI_NOOP));
intel_mark_page_flip_active(intel_crtc);
__intel_ring_advance(ring);
return 0;
-
-err_unpin:
- intel_unpin_fb_obj(obj);
-err:
- return ret;
}
static int intel_default_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
uint32_t flags)
{
return -ENODEV;
@@ -8881,6 +9212,7 @@
struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
+ struct intel_engine_cs *ring;
unsigned long flags;
int ret;
@@ -8909,7 +9241,7 @@
work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
- ret = drm_vblank_get(dev, intel_crtc->pipe);
+ ret = drm_crtc_vblank_get(crtc);
if (ret)
goto free_work;
@@ -8918,7 +9250,7 @@
if (intel_crtc->unpin_work) {
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
- drm_vblank_put(dev, intel_crtc->pipe);
+ drm_crtc_vblank_put(crtc);
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
return -EBUSY;
@@ -8946,10 +9278,30 @@
atomic_inc(&intel_crtc->unpin_work_count);
intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
- ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, page_flip_flags);
+ if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+ work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(intel_crtc->pipe)) + 1;
+
+ if (IS_VALLEYVIEW(dev)) {
+ ring = &dev_priv->ring[BCS];
+ } else if (INTEL_INFO(dev)->gen >= 7) {
+ ring = obj->ring;
+ if (ring == NULL || ring->id != RCS)
+ ring = &dev_priv->ring[BCS];
+ } else {
+ ring = &dev_priv->ring[RCS];
+ }
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
goto cleanup_pending;
+ work->gtt_offset =
+ i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
+
+ ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, page_flip_flags);
+ if (ret)
+ goto cleanup_unpin;
+
intel_disable_fbc(dev);
intel_mark_fb_busy(obj, NULL);
mutex_unlock(&dev->struct_mutex);
@@ -8958,6 +9310,8 @@
return 0;
+cleanup_unpin:
+ intel_unpin_fb_obj(obj);
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
crtc->primary->fb = old_fb;
@@ -8970,7 +9324,7 @@
intel_crtc->unpin_work = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
- drm_vblank_put(dev, intel_crtc->pipe);
+ drm_crtc_vblank_put(crtc);
free_work:
kfree(work);
@@ -9013,8 +9367,7 @@
to_intel_crtc(encoder->base.crtc);
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list,
- base.head) {
+ for_each_intel_crtc(dev, crtc) {
crtc->new_enabled = crtc->base.enabled;
if (crtc->new_enabled)
@@ -9045,21 +9398,20 @@
encoder->base.crtc = &encoder->new_crtc->base;
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list,
- base.head) {
+ for_each_intel_crtc(dev, crtc) {
crtc->base.enabled = crtc->new_enabled;
}
}
static void
-connected_sink_compute_bpp(struct intel_connector * connector,
+connected_sink_compute_bpp(struct intel_connector *connector,
struct intel_crtc_config *pipe_config)
{
int bpp = pipe_config->pipe_bpp;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
connector->base.base.id,
- drm_get_connector_name(&connector->base));
+ connector->base.name);
/* Don't use an invalid EDID bpc value */
if (connector->base.display_info.bpc &&
@@ -9400,8 +9752,7 @@
}
/* Check for pipes that will be enabled/disabled ... */
- list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
- base.head) {
+ for_each_intel_crtc(dev, intel_crtc) {
if (intel_crtc->base.enabled == intel_crtc->new_enabled)
continue;
@@ -9474,8 +9825,7 @@
intel_modeset_commit_output_state(dev);
/* Double check state. */
- list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
- base.head) {
+ for_each_intel_crtc(dev, intel_crtc) {
WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
WARN_ON(intel_crtc->new_config &&
intel_crtc->new_config != &intel_crtc->config);
@@ -9604,6 +9954,12 @@
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
PIPE_CONF_CHECK_I(pixel_multiplier);
+ PIPE_CONF_CHECK_I(has_hdmi_sink);
+ if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
+ IS_VALLEYVIEW(dev))
+ PIPE_CONF_CHECK_I(limited_color_range);
+
+ PIPE_CONF_CHECK_I(has_audio);
PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
DRM_MODE_FLAG_INTERLACE);
@@ -9701,7 +10057,7 @@
DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
encoder->base.base.id,
- drm_get_encoder_name(&encoder->base));
+ encoder->base.name);
WARN(&encoder->new_crtc->base != encoder->base.crtc,
"encoder's stage crtc doesn't match current crtc\n");
@@ -9753,8 +10109,7 @@
struct intel_encoder *encoder;
struct intel_crtc_config pipe_config;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list,
- base.head) {
+ for_each_intel_crtc(dev, crtc) {
bool enabled = false;
bool active = false;
@@ -9843,8 +10198,7 @@
"pll on state mismatch (expected %i, found %i)\n",
pll->on, active);
- list_for_each_entry(crtc, &dev->mode_config.crtc_list,
- base.head) {
+ for_each_intel_crtc(dev, crtc) {
if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
enabled_crtcs++;
if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
@@ -9884,6 +10238,44 @@
pipe_config->adjusted_mode.crtc_clock, dotclock);
}
+static void update_scanline_offset(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+
+ /*
+ * The scanline counter increments at the leading edge of hsync.
+ *
+ * On most platforms it starts counting from vtotal-1 on the
+ * first active line. That means the scanline counter value is
+ * always one less than what we would expect. Ie. just after
+ * start of vblank, which also occurs at start of hsync (on the
+ * last active line), the scanline counter will read vblank_start-1.
+ *
+ * On gen2 the scanline counter starts counting from 1 instead
+ * of vtotal-1, so we have to subtract one (or rather add vtotal-1
+ * to keep the value positive), instead of adding one.
+ *
+ * On HSW+ the behaviour of the scanline counter depends on the output
+ * type. For DP ports it behaves like most other platforms, but on HDMI
+ * there's an extra 1 line difference. So we need to add two instead of
+ * one to the value.
+ */
+ if (IS_GEN2(dev)) {
+ const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
+ int vtotal;
+
+ vtotal = mode->crtc_vtotal;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vtotal /= 2;
+
+ crtc->scanline_offset = vtotal - 1;
+ } else if (HAS_DDI(dev) &&
+ intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) {
+ crtc->scanline_offset = 2;
+ } else
+ crtc->scanline_offset = 1;
+}
+
static int __intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb)
@@ -9975,15 +10367,38 @@
* on the DPLL.
*/
for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
- ret = intel_crtc_mode_set(&intel_crtc->base,
- x, y, fb);
+ struct drm_framebuffer *old_fb;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = intel_pin_and_fence_fb_obj(dev,
+ to_intel_framebuffer(fb)->obj,
+ NULL);
+ if (ret != 0) {
+ DRM_ERROR("pin & fence failed\n");
+ mutex_unlock(&dev->struct_mutex);
+ goto done;
+ }
+ old_fb = crtc->primary->fb;
+ if (old_fb)
+ intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ crtc->primary->fb = fb;
+ crtc->x = x;
+ crtc->y = y;
+
+ ret = dev_priv->display.crtc_mode_set(&intel_crtc->base,
+ x, y, fb);
if (ret)
goto done;
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
- for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
+ for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
+ update_scanline_offset(intel_crtc);
+
dev_priv->display.crtc_enable(&intel_crtc->base);
+ }
/* FIXME: add subpixel order */
done:
@@ -10059,7 +10474,7 @@
* restored, not the drivers personal bookkeeping.
*/
count = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, crtc) {
config->save_crtc_enabled[count++] = crtc->enabled;
}
@@ -10085,7 +10500,7 @@
int count;
count = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ for_each_intel_crtc(dev, crtc) {
crtc->new_enabled = config->save_crtc_enabled[count++];
if (crtc->new_enabled)
@@ -10209,7 +10624,7 @@
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
connector->base.base.id,
- drm_get_connector_name(&connector->base));
+ connector->base.name);
}
@@ -10244,7 +10659,7 @@
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
connector->base.base.id,
- drm_get_connector_name(&connector->base),
+ connector->base.name,
new_crtc->base.id);
}
@@ -10275,8 +10690,7 @@
}
/* Now we've also updated encoder->new_crtc for all encoders. */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list,
- base.head) {
+ for_each_intel_crtc(dev, crtc) {
crtc->new_enabled = false;
list_for_each_entry(encoder,
@@ -10489,7 +10903,7 @@
struct intel_crtc *crtc;
/* Make sure no transcoder isn't still depending on us. */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ for_each_intel_crtc(dev, crtc) {
if (intel_crtc_to_shared_dpll(crtc) == pll)
assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
}
@@ -10572,13 +10986,16 @@
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
+
+ WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
}
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
{
struct drm_encoder *encoder = connector->base.encoder;
+ struct drm_device *dev = connector->base.dev;
- WARN_ON(!mutex_is_locked(&connector->base.dev->mode_config.mutex));
+ WARN_ON(!mutex_is_locked(&dev->mode_config.connection_mutex));
if (!encoder)
return INVALID_PIPE;
@@ -10674,7 +11091,7 @@
intel_lvds_init(dev);
- if (!IS_ULT(dev))
+ if (!IS_ULT(dev) && !IS_CHERRYVIEW(dev))
intel_crt_init(dev);
if (HAS_DDI(dev)) {
@@ -10738,6 +11155,15 @@
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
}
+ if (IS_CHERRYVIEW(dev)) {
+ if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED) {
+ intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID,
+ PORT_D);
+ if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED)
+ intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D);
+ }
+ }
+
intel_dsi_init(dev);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
@@ -10967,6 +11393,8 @@
if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
dev_priv->display.find_dpll = g4x_find_best_dpll;
+ else if (IS_CHERRYVIEW(dev))
+ dev_priv->display.find_dpll = chv_find_best_dpll;
else if (IS_VALLEYVIEW(dev))
dev_priv->display.find_dpll = vlv_find_best_dpll;
else if (IS_PINEVIEW(dev))
@@ -11336,12 +11764,11 @@
/* Just in case the BIOS is doing something questionable. */
intel_disable_fbc(dev);
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, false);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
- list_for_each_entry(crtc, &dev->mode_config.crtc_list,
- base.head) {
+ for_each_intel_crtc(dev, crtc) {
if (!crtc->active)
continue;
@@ -11430,6 +11857,12 @@
reg = PIPECONF(crtc->config.cpu_transcoder);
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+ /* restore vblank interrupts to correct state */
+ if (crtc->active)
+ drm_vblank_on(dev, crtc->pipe);
+ else
+ drm_vblank_off(dev, crtc->pipe);
+
/* We need to sanitize the plane -> pipe mapping first because this will
* disable the crtc (and hence change the state) if it is wrong. Note
* that gen4+ has a fixed plane -> pipe mapping. */
@@ -11499,16 +11932,25 @@
encoder->base.crtc = NULL;
}
}
- if (crtc->active) {
+
+ if (crtc->active || IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen < 5) {
/*
* We start out with underrun reporting disabled to avoid races.
* For correct bookkeeping mark this on active crtcs.
*
+ * Also on gmch platforms we dont have any hardware bits to
+ * disable the underrun reporting. Which means we need to start
+ * out with underrun reporting disabled also on inactive pipes,
+ * since otherwise we'll complain about the garbage we read when
+ * e.g. coming up after runtime pm.
+ *
* No protection against concurrent access is required - at
* worst a fifo underrun happens which also sets this to false.
*/
crtc->cpu_fifo_underrun_disabled = true;
crtc->pch_fifo_underrun_disabled = true;
+
+ update_scanline_offset(crtc);
}
}
@@ -11526,7 +11968,7 @@
if (encoder->connectors_active && !has_active_crtc) {
DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
encoder->base.base.id,
- drm_get_encoder_name(&encoder->base));
+ encoder->base.name);
/* Connector is active, but has no active pipe. This is
* fallout from our resume register restoring. Disable
@@ -11534,7 +11976,7 @@
if (encoder->base.crtc) {
DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
encoder->base.base.id,
- drm_get_encoder_name(&encoder->base));
+ encoder->base.name);
encoder->disable(encoder);
}
@@ -11602,8 +12044,7 @@
struct intel_connector *connector;
int i;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list,
- base.head) {
+ for_each_intel_crtc(dev, crtc) {
memset(&crtc->config, 0, sizeof(crtc->config));
crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
@@ -11628,8 +12069,7 @@
pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
pll->active = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list,
- base.head) {
+ for_each_intel_crtc(dev, crtc) {
if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
pll->active++;
}
@@ -11654,7 +12094,7 @@
encoder->connectors_active = false;
DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
encoder->base.base.id,
- drm_get_encoder_name(&encoder->base),
+ encoder->base.name,
encoder->base.crtc ? "enabled" : "disabled",
pipe_name(pipe));
}
@@ -11671,7 +12111,7 @@
}
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
connector->base.base.id,
- drm_get_connector_name(&connector->base),
+ connector->base.name,
connector->base.encoder ? "enabled" : "disabled");
}
}
@@ -11694,8 +12134,7 @@
* Note that this could go away if we move to using crtc_config
* checking everywhere.
*/
- list_for_each_entry(crtc, &dev->mode_config.crtc_list,
- base.head) {
+ for_each_intel_crtc(dev, crtc) {
if (crtc->active && i915.fastboot) {
intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config);
DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
@@ -11771,7 +12210,7 @@
* for this.
*/
mutex_lock(&dev->struct_mutex);
- list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, c) {
if (!c->primary->fb)
continue;
@@ -11817,7 +12256,7 @@
intel_unregister_dsm_handler();
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, crtc) {
/* Skip inactive CRTCs */
if (!crtc->primary->fb)
continue;
@@ -11973,15 +12412,9 @@
if (!error->pipe[i].power_domain_on)
continue;
- if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
- error->cursor[i].control = I915_READ(CURCNTR(i));
- error->cursor[i].position = I915_READ(CURPOS(i));
- error->cursor[i].base = I915_READ(CURBASE(i));
- } else {
- error->cursor[i].control = I915_READ(CURCNTR_IVB(i));
- error->cursor[i].position = I915_READ(CURPOS_IVB(i));
- error->cursor[i].base = I915_READ(CURBASE_IVB(i));
- }
+ error->cursor[i].control = I915_READ(CURCNTR(i));
+ error->cursor[i].position = I915_READ(CURPOS(i));
+ error->cursor[i].base = I915_READ(CURBASE(i));
error->plane[i].control = I915_READ(DSPCNTR(i));
error->plane[i].stride = I915_READ(DSPSTRIDE(i));
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 34ed143..c4d8839 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -64,6 +64,24 @@
{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
};
+/*
+ * CHV supports eDP 1.4 that have more link rates.
+ * Below only provides the fixed rate but exclude variable rate.
+ */
+static const struct dp_link_dpll chv_dpll[] = {
+ /*
+ * CHV requires to program fractional division for m2.
+ * m2 is stored in fixed point format using formula below
+ * (m2_int << 22) | m2_fraction
+ */
+ { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
+ { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
+ { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
+ { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
+ { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
+ { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
+};
+
/**
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @intel_dp: DP struct
@@ -726,6 +744,9 @@
} else if (HAS_PCH_SPLIT(dev)) {
divisor = pch_dpll;
count = ARRAY_SIZE(pch_dpll);
+ } else if (IS_CHERRYVIEW(dev)) {
+ divisor = chv_dpll;
+ count = ARRAY_SIZE(chv_dpll);
} else if (IS_VALLEYVIEW(dev)) {
divisor = vlv_dpll;
count = ARRAY_SIZE(vlv_dpll);
@@ -779,6 +800,7 @@
pipe_config->has_pch_encoder = true;
pipe_config->has_dp_encoder = true;
+ pipe_config->has_audio = intel_dp->has_audio;
if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
@@ -903,7 +925,7 @@
udelay(500);
}
-static void intel_dp_mode_set(struct intel_encoder *encoder)
+static void intel_dp_prepare(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -938,7 +960,7 @@
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
- if (intel_dp->has_audio) {
+ if (crtc->config.has_audio) {
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(crtc->pipe));
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
@@ -971,14 +993,15 @@
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
intel_dp->DP |= DP_ENHANCED_FRAMING;
- if (crtc->pipe == 1)
- intel_dp->DP |= DP_PIPEB_SELECT;
+ if (!IS_CHERRYVIEW(dev)) {
+ if (crtc->pipe == 1)
+ intel_dp->DP |= DP_PIPEB_SELECT;
+ } else {
+ intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
+ }
} else {
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
}
-
- if (port == PORT_A && !IS_VALLEYVIEW(dev))
- ironlake_set_pll_cpu_edp(intel_dp);
}
#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
@@ -1131,7 +1154,7 @@
u32 pp;
u32 pp_stat_reg, pp_ctrl_reg;
- WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ WARN_ON(!mutex_is_locked(&dev->mode_config.connection_mutex));
if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) {
struct intel_digital_port *intel_dig_port =
@@ -1168,9 +1191,9 @@
struct intel_dp, panel_vdd_work);
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev->mode_config.connection_mutex);
edp_panel_vdd_off_sync(intel_dp);
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev->mode_config.connection_mutex);
}
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
@@ -1434,6 +1457,8 @@
if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
*pipe = PORT_TO_PIPE_CPT(tmp);
+ } else if (IS_CHERRYVIEW(dev)) {
+ *pipe = DP_PORT_TO_PIPE_CHV(tmp);
} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
*pipe = PORT_TO_PIPE(tmp);
} else {
@@ -1481,8 +1506,11 @@
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
int dotclock;
+ tmp = I915_READ(intel_dp->output_reg);
+ if (tmp & DP_AUDIO_OUTPUT_ENABLE)
+ pipe_config->has_audio = true;
+
if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
- tmp = I915_READ(intel_dp->output_reg);
if (tmp & DP_SYNC_HS_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
@@ -1837,6 +1865,42 @@
intel_dp_link_down(intel_dp);
}
+static void chv_post_disable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 val;
+
+ intel_dp_link_down(intel_dp);
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* Propagate soft reset to data lane reset */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+ val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+ val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
static void intel_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
@@ -1876,8 +1940,13 @@
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- if (dport->port == PORT_A)
+ intel_dp_prepare(encoder);
+
+ /* Only ilk+ has port A */
+ if (dport->port == PORT_A) {
+ ironlake_set_pll_cpu_edp(intel_dp);
ironlake_edp_pll_on(intel_dp);
+ }
}
static void vlv_pre_enable_dp(struct intel_encoder *encoder)
@@ -1929,6 +1998,8 @@
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
+ intel_dp_prepare(encoder);
+
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->dpio_lock);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
@@ -1947,6 +2018,69 @@
mutex_unlock(&dev_priv->dpio_lock);
}
+static void chv_pre_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct edp_power_seq power_seq;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+ int data, i;
+ u32 val;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* Deassert soft data lane reset*/
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+ val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+ val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+
+ /* Program Tx lane latency optimal setting*/
+ for (i = 0; i < 4; i++) {
+ /* Set the latency optimal bit */
+ data = (i == 1) ? 0x0 : 0x6;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
+ data << DPIO_FRC_LATENCY_SHFIT);
+
+ /* Set the upar bit */
+ data = (i == 1) ? 0x0 : 0x1;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
+ data << DPIO_UPAR_SHIFT);
+ }
+
+ /* Data lane stagger programming */
+ /* FIXME: Fix up value only after power analysis */
+
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ if (is_edp(intel_dp)) {
+ /* init power sequencer on this pipe and port */
+ intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+ &power_seq);
+ }
+
+ intel_enable_dp(encoder);
+
+ vlv_wait_port_ready(dev_priv, dport);
+}
+
/*
* Native read with retry for link status and receiver capability reads for
* cases where the sink may still be asleep.
@@ -2171,6 +2305,166 @@
return 0;
}
+static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
+ u32 deemph_reg_value, margin_reg_value, val;
+ uint8_t train_set = intel_dp->train_set[0];
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum pipe pipe = intel_crtc->pipe;
+ int i;
+
+ switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
+ case DP_TRAIN_PRE_EMPHASIS_0:
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ deemph_reg_value = 128;
+ margin_reg_value = 52;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ deemph_reg_value = 128;
+ margin_reg_value = 77;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ deemph_reg_value = 128;
+ margin_reg_value = 102;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ deemph_reg_value = 128;
+ margin_reg_value = 154;
+ /* FIXME extra to set for 1200 */
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_3_5:
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ deemph_reg_value = 85;
+ margin_reg_value = 78;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ deemph_reg_value = 85;
+ margin_reg_value = 116;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ deemph_reg_value = 85;
+ margin_reg_value = 154;
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_6:
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ deemph_reg_value = 64;
+ margin_reg_value = 104;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ deemph_reg_value = 64;
+ margin_reg_value = 154;
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_9_5:
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ deemph_reg_value = 43;
+ margin_reg_value = 154;
+ break;
+ default:
+ return 0;
+ }
+ break;
+ default:
+ return 0;
+ }
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* Clear calc init */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+ val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+
+ /* Program swing deemph */
+ for (i = 0; i < 4; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
+ val &= ~DPIO_SWING_DEEMPH9P5_MASK;
+ val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
+ }
+
+ /* Program swing margin */
+ for (i = 0; i < 4; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
+ val &= ~DPIO_SWING_MARGIN_MASK;
+ val |= margin_reg_value << DPIO_SWING_MARGIN_SHIFT;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
+ }
+
+ /* Disable unique transition scale */
+ for (i = 0; i < 4; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
+ val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
+ }
+
+ if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
+ == DP_TRAIN_PRE_EMPHASIS_0) &&
+ ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
+ == DP_TRAIN_VOLTAGE_SWING_1200)) {
+
+ /*
+ * The document said it needs to set bit 27 for ch0 and bit 26
+ * for ch1. Might be a typo in the doc.
+ * For now, for this unique transition scale selection, set bit
+ * 27 for ch0 and ch1.
+ */
+ for (i = 0; i < 4; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
+ val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
+ }
+
+ for (i = 0; i < 4; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
+ val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
+ val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
+ }
+ }
+
+ /* Start swing calculation */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+ val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+
+ /* LRC Bypass */
+ val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
+ val |= DPIO_LRC_BYPASS;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ return 0;
+}
+
static void
intel_get_adjust_train(struct intel_dp *intel_dp,
const uint8_t link_status[DP_LINK_STATUS_SIZE])
@@ -2385,6 +2679,9 @@
} else if (IS_HASWELL(dev)) {
signal_levels = intel_hsw_signal_levels(train_set);
mask = DDI_BUF_EMP_MASK;
+ } else if (IS_CHERRYVIEW(dev)) {
+ signal_levels = intel_chv_signal_levels(intel_dp);
+ mask = 0;
} else if (IS_VALLEYVIEW(dev)) {
signal_levels = intel_vlv_signal_levels(intel_dp);
mask = 0;
@@ -2751,22 +3048,7 @@
to_intel_crtc(intel_dig_port->base.base.crtc);
uint32_t DP = intel_dp->DP;
- /*
- * DDI code has a strict mode set sequence and we should try to respect
- * it, otherwise we might hang the machine in many different ways. So we
- * really should be disabling the port only on a complete crtc_disable
- * sequence. This function is just called under two conditions on DDI
- * code:
- * - Link train failed while doing crtc_enable, and on this case we
- * really should respect the mode set sequence and wait for a
- * crtc_disable.
- * - Someone turned the monitor off and intel_dp_check_link_status
- * called us. We don't need to disable the whole port on this case, so
- * when someone turns the monitor on again,
- * intel_ddi_prepare_link_retrain will take care of redoing the link
- * train.
- */
- if (HAS_DDI(dev))
+ if (WARN_ON(HAS_DDI(dev)))
return;
if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
@@ -2953,6 +3235,7 @@
u8 sink_irq_vector;
u8 link_status[DP_LINK_STATUS_SIZE];
+ /* FIXME: This access isn't protected by any locks. */
if (!intel_encoder->connectors_active)
return;
@@ -2985,7 +3268,7 @@
if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
- drm_get_encoder_name(&intel_encoder->base));
+ intel_encoder->base.name);
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
@@ -3171,7 +3454,7 @@
intel_display_power_get(dev_priv, power_domain);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, drm_get_connector_name(connector));
+ connector->base.id, connector->name);
intel_dp->has_audio = false;
@@ -3383,9 +3666,9 @@
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev->mode_config.connection_mutex);
edp_panel_vdd_off_sync(intel_dp);
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev->mode_config.connection_mutex);
}
kfree(intel_dig_port);
}
@@ -3964,9 +4247,9 @@
drm_dp_aux_unregister_i2c_bus(&intel_dp->aux);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev->mode_config.connection_mutex);
edp_panel_vdd_off_sync(intel_dp);
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev->mode_config.connection_mutex);
}
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
@@ -4012,11 +4295,14 @@
DRM_MODE_ENCODER_TMDS);
intel_encoder->compute_config = intel_dp_compute_config;
- intel_encoder->mode_set = intel_dp_mode_set;
intel_encoder->disable = intel_disable_dp;
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
- if (IS_VALLEYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev)) {
+ intel_encoder->pre_enable = chv_pre_enable_dp;
+ intel_encoder->enable = vlv_enable_dp;
+ intel_encoder->post_disable = chv_post_disable_dp;
+ } else if (IS_VALLEYVIEW(dev)) {
intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
intel_encoder->pre_enable = vlv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
@@ -4031,7 +4317,14 @@
intel_dig_port->dp.output_reg = output_reg;
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ if (IS_CHERRYVIEW(dev)) {
+ if (port == PORT_D)
+ intel_encoder->crtc_mask = 1 << 2;
+ else
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ } else {
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ }
intel_encoder->cloneable = 0;
intel_encoder->hot_plug = intel_dp_hot_plug;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index d8b540b..f1d5897 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -106,8 +106,8 @@
#define INTEL_DVO_CHIP_TMDS 2
#define INTEL_DVO_CHIP_TVOUT 4
-#define INTEL_DSI_COMMAND_MODE 0
-#define INTEL_DSI_VIDEO_MODE 1
+#define INTEL_DSI_VIDEO_MODE 0
+#define INTEL_DSI_COMMAND_MODE 1
struct intel_framebuffer {
struct drm_framebuffer base;
@@ -273,6 +273,13 @@
* accordingly. */
bool has_dp_encoder;
+ /* Whether we should send NULL infoframes. Required for audio. */
+ bool has_hdmi_sink;
+
+ /* Audio enabled on this pipe. Only valid if either has_hdmi_sink or
+ * has_dp_encoder is set. */
+ bool has_audio;
+
/*
* Enable dithering, used when the selected pipe bpp doesn't match the
* plane bpp.
@@ -363,7 +370,6 @@
*/
bool active;
unsigned long enabled_power_domains;
- bool eld_vld;
bool primary_enabled; /* is the primary plane (partially) visible? */
bool lowfreq_avail;
struct intel_overlay *overlay;
@@ -403,6 +409,8 @@
} wm;
wait_queue_head_t vbl_wait;
+
+ int scanline_offset;
};
struct intel_plane_wm_parameters {
@@ -486,6 +494,7 @@
enum hdmi_infoframe_type type,
const void *frame, ssize_t len);
void (*set_infoframes)(struct drm_encoder *encoder,
+ bool enable,
struct drm_display_mode *adjusted_mode);
};
@@ -561,6 +570,7 @@
{
switch (dport->port) {
case PORT_B:
+ case PORT_D:
return DPIO_CH0;
case PORT_C:
return DPIO_CH1;
@@ -569,6 +579,20 @@
}
}
+static inline int
+vlv_pipe_to_channel(enum pipe pipe)
+{
+ switch (pipe) {
+ case PIPE_A:
+ case PIPE_C:
+ return DPIO_CH0;
+ case PIPE_B:
+ return DPIO_CH1;
+ default:
+ BUG();
+ }
+}
+
static inline struct drm_crtc *
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
{
@@ -593,6 +617,8 @@
#define INTEL_FLIP_INACTIVE 0
#define INTEL_FLIP_PENDING 1
#define INTEL_FLIP_COMPLETE 2
+ u32 flip_count;
+ u32 gtt_offset;
bool enable_stall_check;
};
@@ -644,8 +670,6 @@
/* i915_irq.c */
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable);
-bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe, bool enable);
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable);
@@ -653,9 +677,12 @@
void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void intel_runtime_pm_disable_interrupts(struct drm_device *dev);
void intel_runtime_pm_restore_interrupts(struct drm_device *dev);
int intel_get_crtc_scanline(struct intel_crtc *crtc);
+void i9xx_check_fifo_underruns(struct drm_device *dev);
/* intel_crt.c */
@@ -694,7 +721,7 @@
int valleyview_cur_cdclk(struct drm_i915_private *dev_priv);
void intel_mark_busy(struct drm_device *dev);
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring);
+ struct intel_engine_cs *ring);
void intel_mark_idle(struct drm_device *dev);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
void intel_crtc_update_dpms(struct drm_crtc *crtc);
@@ -726,7 +753,7 @@
struct intel_load_detect_pipe *old);
int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined);
+ struct intel_engine_cs *pipelined);
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev,
@@ -777,6 +804,8 @@
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_config *pipe_config);
int intel_format_to_fourcc(int format);
+void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
+
/* intel_dp.c */
void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
@@ -902,6 +931,7 @@
/* intel_pm.c */
void intel_init_clock_gating(struct drm_device *dev);
void intel_suspend_hw(struct drm_device *dev);
+int ilk_wm_max_level(const struct drm_device *dev);
void intel_update_watermarks(struct drm_crtc *crtc);
void intel_update_sprite_watermarks(struct drm_plane *plane,
struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 4e271c7..2525cdd 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -59,12 +59,12 @@
static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
{
- return intel_dsi->dev.type == INTEL_DSI_VIDEO_MODE;
+ return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
}
static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
{
- return intel_dsi->dev.type == INTEL_DSI_COMMAND_MODE;
+ return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
}
static void intel_dsi_hot_plug(struct intel_encoder *encoder)
@@ -94,13 +94,6 @@
return true;
}
-static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
-{
- DRM_DEBUG_KMS("\n");
-
- vlv_enable_dsi_pll(encoder);
-}
-
static void intel_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
@@ -185,6 +178,8 @@
/* put device in ready state */
intel_dsi_device_ready(encoder);
+ msleep(intel_dsi->panel_on_delay);
+
if (intel_dsi->dev.dev_ops->panel_reset)
intel_dsi->dev.dev_ops->panel_reset(&intel_dsi->dev);
@@ -301,6 +296,9 @@
if (intel_dsi->dev.dev_ops->disable_panel_power)
intel_dsi->dev.dev_ops->disable_panel_power(&intel_dsi->dev);
+
+ msleep(intel_dsi->panel_off_delay);
+ msleep(intel_dsi->panel_pwr_cycle_delay);
}
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
@@ -428,7 +426,7 @@
I915_WRITE(MIPI_VBP_COUNT(pipe), vbp);
}
-static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
+static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
@@ -525,6 +523,9 @@
/* recovery disables */
I915_WRITE(MIPI_EOT_DISABLE(pipe), val);
+ /* in terms of low power clock */
+ I915_WRITE(MIPI_INIT_COUNT(pipe), intel_dsi->init_count);
+
/* in terms of txbyteclkhs. actual high to low switch +
* MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
*
@@ -562,6 +563,15 @@
RANDOM_DPI_DISPLAY_RESOLUTION);
}
+static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
+{
+ DRM_DEBUG_KMS("\n");
+
+ intel_dsi_prepare(encoder);
+
+ vlv_enable_dsi_pll(encoder);
+}
+
static enum drm_connector_status
intel_dsi_detect(struct drm_connector *connector, bool force)
{
@@ -639,6 +649,7 @@
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_display_mode *fixed_mode = NULL;
+ struct drm_i915_private *dev_priv = dev->dev_private;
const struct intel_dsi_device *dsi;
unsigned int i;
@@ -658,6 +669,13 @@
encoder = &intel_encoder->base;
intel_dsi->attached_connector = intel_connector;
+ if (IS_VALLEYVIEW(dev)) {
+ dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
+ } else {
+ DRM_ERROR("Unsupported Mipi device to reg base");
+ return false;
+ }
+
connector = &intel_connector->base;
drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
@@ -668,7 +686,6 @@
intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
intel_encoder->pre_enable = intel_dsi_pre_enable;
intel_encoder->enable = intel_dsi_enable_nop;
- intel_encoder->mode_set = intel_dsi_mode_set;
intel_encoder->disable = intel_dsi_disable;
intel_encoder->post_disable = intel_dsi_post_disable;
intel_encoder->get_hw_state = intel_dsi_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 550714c..e3f4e91 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -31,7 +31,6 @@
struct intel_dsi_device {
unsigned int panel_id;
const char *name;
- int type;
const struct intel_dsi_dev_ops *dev_ops;
void *dev_priv;
};
@@ -85,6 +84,9 @@
/* virtual channel */
int channel;
+ /* Video mode or command mode */
+ u16 operation_mode;
+
/* number of DSI lanes */
unsigned int lane_count;
@@ -112,6 +114,15 @@
u16 hs_to_lp_count;
u16 clk_lp_to_hs_count;
u16 clk_hs_to_lp_count;
+
+ u16 init_count;
+
+ /* all delays in ms */
+ u16 backlight_off_delay;
+ u16 backlight_on_delay;
+ u16 panel_on_delay;
+ u16 panel_off_delay;
+ u16 panel_pwr_cycle_delay;
};
static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 1604235..a3631c0 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -343,7 +343,7 @@
{
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, drm_get_connector_name(connector));
+ connector->base.id, connector->name);
return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index fce4a0d..e2d4161 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -343,15 +343,15 @@
num_connectors_detected++;
if (!enabled[i]) {
- DRM_DEBUG_KMS("connector %d not enabled, skipping\n",
- connector->base.id);
+ DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
+ connector->name);
continue;
}
encoder = connector->encoder;
if (!encoder || WARN_ON(!encoder->crtc)) {
- DRM_DEBUG_KMS("connector %d has no encoder or crtc, skipping\n",
- connector->base.id);
+ DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
+ connector->name);
enabled[i] = false;
continue;
}
@@ -373,16 +373,16 @@
}
}
- DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
- fb_conn->connector->base.id);
+ DRM_DEBUG_KMS("looking for cmdline mode on connector %s\n",
+ connector->name);
/* go for command line mode first */
modes[i] = drm_pick_cmdline_mode(fb_conn, width, height);
/* try for preferred next */
if (!modes[i]) {
- DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
- fb_conn->connector->base.id);
+ DRM_DEBUG_KMS("looking for preferred mode on connector %s\n",
+ connector->name);
modes[i] = drm_has_preferred_mode(fb_conn, width,
height);
}
@@ -400,16 +400,20 @@
* since the fb helper layer wants a pointer to
* something we own.
*/
+ DRM_DEBUG_KMS("looking for current mode on connector %s\n",
+ connector->name);
intel_mode_from_pipe_config(&encoder->crtc->hwmode,
&to_intel_crtc(encoder->crtc)->config);
modes[i] = &encoder->crtc->hwmode;
}
crtcs[i] = new_crtc;
- DRM_DEBUG_KMS("connector %s on crtc %d: %s\n",
- drm_get_connector_name(connector),
+ DRM_DEBUG_KMS("connector %s on pipe %d [CRTC:%d]: %dx%d%s\n",
+ connector->name,
+ pipe_name(to_intel_crtc(encoder->crtc)->pipe),
encoder->crtc->base.id,
- modes[i]->name);
+ modes[i]->hdisplay, modes[i]->vdisplay,
+ modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
fallback = false;
}
@@ -488,7 +492,7 @@
return false;
/* Find the largest fb */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, crtc) {
intel_crtc = to_intel_crtc(crtc);
if (!intel_crtc->active || !crtc->primary->fb) {
@@ -512,7 +516,7 @@
}
/* Now make sure all the pipes will fit into it */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, crtc) {
unsigned int cur_size;
intel_crtc = to_intel_crtc(crtc);
@@ -577,7 +581,7 @@
drm_framebuffer_reference(&ifbdev->fb->base);
/* Final pass to check if any active pipes don't have fbs */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, crtc) {
intel_crtc = to_intel_crtc(crtc);
if (!intel_crtc->active)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index b606162..eee2bbe 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -418,6 +418,7 @@
}
static void g4x_set_infoframes(struct drm_encoder *encoder,
+ bool enable,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
@@ -440,7 +441,7 @@
* either. */
val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
- if (!intel_hdmi->has_hdmi_sink) {
+ if (!enable) {
if (!(val & VIDEO_DIP_ENABLE))
return;
val &= ~VIDEO_DIP_ENABLE;
@@ -471,6 +472,7 @@
}
static void ibx_set_infoframes(struct drm_encoder *encoder,
+ bool enable,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
@@ -486,7 +488,7 @@
/* See the big comment in g4x_set_infoframes() */
val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
- if (!intel_hdmi->has_hdmi_sink) {
+ if (!enable) {
if (!(val & VIDEO_DIP_ENABLE))
return;
val &= ~VIDEO_DIP_ENABLE;
@@ -518,6 +520,7 @@
}
static void cpt_set_infoframes(struct drm_encoder *encoder,
+ bool enable,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
@@ -531,7 +534,7 @@
/* See the big comment in g4x_set_infoframes() */
val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
- if (!intel_hdmi->has_hdmi_sink) {
+ if (!enable) {
if (!(val & VIDEO_DIP_ENABLE))
return;
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI);
@@ -554,6 +557,7 @@
}
static void vlv_set_infoframes(struct drm_encoder *encoder,
+ bool enable,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
@@ -569,7 +573,7 @@
/* See the big comment in g4x_set_infoframes() */
val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
- if (!intel_hdmi->has_hdmi_sink) {
+ if (!enable) {
if (!(val & VIDEO_DIP_ENABLE))
return;
val &= ~VIDEO_DIP_ENABLE;
@@ -601,6 +605,7 @@
}
static void hsw_set_infoframes(struct drm_encoder *encoder,
+ bool enable,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
@@ -611,7 +616,7 @@
assert_hdmi_port_disabled(intel_hdmi);
- if (!intel_hdmi->has_hdmi_sink) {
+ if (!enable) {
I915_WRITE(reg, 0);
POSTING_READ(reg);
return;
@@ -628,7 +633,7 @@
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
-static void intel_hdmi_mode_set(struct intel_encoder *encoder)
+static void intel_hdmi_prepare(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -650,20 +655,21 @@
else
hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
- if (intel_hdmi->has_hdmi_sink &&
- (HAS_PCH_CPT(dev) || IS_VALLEYVIEW(dev)))
+ if (crtc->config.has_hdmi_sink)
hdmi_val |= HDMI_MODE_SELECT_HDMI;
- if (intel_hdmi->has_audio) {
+ if (crtc->config.has_audio) {
+ WARN_ON(!crtc->config.has_hdmi_sink);
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(crtc->pipe));
hdmi_val |= SDVO_AUDIO_ENABLE;
- hdmi_val |= HDMI_MODE_SELECT_HDMI;
intel_write_eld(&encoder->base, adjusted_mode);
}
if (HAS_PCH_CPT(dev))
hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
+ else if (IS_CHERRYVIEW(dev))
+ hdmi_val |= SDVO_PIPE_SEL_CHV(crtc->pipe);
else
hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
@@ -691,6 +697,8 @@
if (HAS_PCH_CPT(dev))
*pipe = PORT_TO_PIPE_CPT(tmp);
+ else if (IS_CHERRYVIEW(dev))
+ *pipe = SDVO_PORT_TO_PIPE_CHV(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
@@ -717,6 +725,12 @@
else
flags |= DRM_MODE_FLAG_NVSYNC;
+ if (tmp & HDMI_MODE_SELECT_HDMI)
+ pipe_config->has_hdmi_sink = true;
+
+ if (tmp & HDMI_MODE_SELECT_HDMI)
+ pipe_config->has_audio = true;
+
pipe_config->adjusted_mode.flags |= flags;
if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
@@ -739,7 +753,7 @@
u32 temp;
u32 enable_bits = SDVO_ENABLE;
- if (intel_hdmi->has_audio)
+ if (intel_crtc->config.has_audio)
enable_bits |= SDVO_AUDIO_ENABLE;
temp = I915_READ(intel_hdmi->hdmi_reg);
@@ -893,9 +907,11 @@
int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
int desired_bpp;
+ pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink;
+
if (intel_hdmi->color_range_auto) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
- if (intel_hdmi->has_hdmi_sink &&
+ if (pipe_config->has_hdmi_sink &&
drm_match_cea_mode(adjusted_mode) > 1)
intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
else
@@ -908,13 +924,16 @@
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev))
pipe_config->has_pch_encoder = true;
+ if (pipe_config->has_hdmi_sink && intel_hdmi->has_audio)
+ pipe_config->has_audio = true;
+
/*
* HDMI is either 12 or 8, so if the display lets 10bpc sneak
* through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi
* outputs. We also need to check that the higher clock still fits
* within limits.
*/
- if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink &&
+ if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
clock_12bpc <= portclock_limit &&
hdmi_12bpc_possible(encoder->new_crtc)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
@@ -954,7 +973,7 @@
enum drm_connector_status status = connector_status_disconnected;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, drm_get_connector_name(connector));
+ connector->base.id, connector->name);
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
@@ -1121,7 +1140,11 @@
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
- intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
+ intel_hdmi_prepare(encoder);
+
+ intel_hdmi->set_infoframes(&encoder->base,
+ intel_crtc->config.has_hdmi_sink,
+ adjusted_mode);
}
static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
@@ -1138,9 +1161,6 @@
int pipe = intel_crtc->pipe;
u32 val;
- if (!IS_VALLEYVIEW(dev))
- return;
-
/* Enable clock channels for this port */
mutex_lock(&dev_priv->dpio_lock);
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
@@ -1167,7 +1187,9 @@
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
mutex_unlock(&dev_priv->dpio_lock);
- intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
+ intel_hdmi->set_infoframes(&encoder->base,
+ intel_crtc->config.has_hdmi_sink,
+ adjusted_mode);
intel_enable_hdmi(encoder);
@@ -1184,8 +1206,7 @@
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
- if (!IS_VALLEYVIEW(dev))
- return;
+ intel_hdmi_prepare(encoder);
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->dpio_lock);
@@ -1224,6 +1245,152 @@
mutex_unlock(&dev_priv->dpio_lock);
}
+static void chv_hdmi_post_disable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 val;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* Propagate soft reset to data lane reset */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+ val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+ val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
+static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+ int data, i;
+ u32 val;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* Deassert soft data lane reset*/
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+ val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+ val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+
+ /* Program Tx latency optimal setting */
+ for (i = 0; i < 4; i++) {
+ /* Set the latency optimal bit */
+ data = (i == 1) ? 0x0 : 0x6;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
+ data << DPIO_FRC_LATENCY_SHFIT);
+
+ /* Set the upar bit */
+ data = (i == 1) ? 0x0 : 0x1;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
+ data << DPIO_UPAR_SHIFT);
+ }
+
+ /* Data lane stagger programming */
+ /* FIXME: Fix up value only after power analysis */
+
+ /* Clear calc init */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+ val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+
+ /* FIXME: Program the support xxx V-dB */
+ /* Use 800mV-0dB */
+ for (i = 0; i < 4; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
+ val &= ~DPIO_SWING_DEEMPH9P5_MASK;
+ val |= 128 << DPIO_SWING_DEEMPH9P5_SHIFT;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
+ }
+
+ for (i = 0; i < 4; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
+ val &= ~DPIO_SWING_MARGIN_MASK;
+ val |= 102 << DPIO_SWING_MARGIN_SHIFT;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
+ }
+
+ /* Disable unique transition scale */
+ for (i = 0; i < 4; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
+ val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
+ }
+
+ /* Additional steps for 1200mV-0dB */
+#if 0
+ val = vlv_dpio_read(dev_priv, pipe, VLV_TX_DW3(ch));
+ if (ch)
+ val |= DPIO_TX_UNIQ_TRANS_SCALE_CH1;
+ else
+ val |= DPIO_TX_UNIQ_TRANS_SCALE_CH0;
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(ch), val);
+
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(ch),
+ vlv_dpio_read(dev_priv, pipe, VLV_TX_DW2(ch)) |
+ (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT));
+#endif
+ /* Start swing calculation */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+ val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+
+ /* LRC Bypass */
+ val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
+ val |= DPIO_LRC_BYPASS;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ intel_enable_hdmi(encoder);
+
+ vlv_wait_port_ready(dev_priv, dport);
+}
+
static void intel_hdmi_destroy(struct drm_connector *connector)
{
drm_connector_cleanup(connector);
@@ -1284,7 +1451,10 @@
intel_encoder->hpd_pin = HPD_PORT_C;
break;
case PORT_D:
- intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
+ if (IS_CHERRYVIEW(dev))
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPD_CHV;
+ else
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
intel_encoder->hpd_pin = HPD_PORT_D;
break;
case PORT_A:
@@ -1354,11 +1524,14 @@
DRM_MODE_ENCODER_TMDS);
intel_encoder->compute_config = intel_hdmi_compute_config;
- intel_encoder->mode_set = intel_hdmi_mode_set;
intel_encoder->disable = intel_disable_hdmi;
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->get_config = intel_hdmi_get_config;
- if (IS_VALLEYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev)) {
+ intel_encoder->pre_enable = chv_hdmi_pre_enable;
+ intel_encoder->enable = vlv_enable_hdmi;
+ intel_encoder->post_disable = chv_hdmi_post_disable;
+ } else if (IS_VALLEYVIEW(dev)) {
intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
intel_encoder->pre_enable = vlv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
@@ -1369,7 +1542,14 @@
}
intel_encoder->type = INTEL_OUTPUT_HDMI;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ if (IS_CHERRYVIEW(dev)) {
+ if (port == PORT_D)
+ intel_encoder->crtc_mask = 1 << 2;
+ else
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ } else {
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ }
intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG;
/*
* BSpec is unclear about HDMI+HDMI cloning on g4x, but it seems
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 1b1541d..2312602 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -119,10 +119,6 @@
pipe_config->adjusted_mode.crtc_clock = dotclock;
}
-/* The LVDS pin pair needs to be on before the DPLLs are enabled.
- * This is an exception to the general rule that mode_set doesn't turn
- * things on.
- */
static void intel_pre_enable_lvds(struct intel_encoder *encoder)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
@@ -324,15 +320,6 @@
return true;
}
-static void intel_lvds_mode_set(struct intel_encoder *encoder)
-{
- /*
- * We don't do anything here, the LVDS port is fully set up in the pre
- * enable hook - the ordering constraints for enabling the lvds port vs.
- * enabling the display pll are too strict.
- */
-}
-
/**
* Detect the LVDS connection.
*
@@ -347,7 +334,7 @@
enum drm_connector_status status;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, drm_get_connector_name(connector));
+ connector->base.id, connector->name);
status = intel_panel_detect(dev);
if (status != connector_status_unknown)
@@ -946,7 +933,6 @@
intel_encoder->enable = intel_enable_lvds;
intel_encoder->pre_enable = intel_pre_enable_lvds;
intel_encoder->compute_config = intel_lvds_compute_config;
- intel_encoder->mode_set = intel_lvds_mode_set;
intel_encoder->disable = intel_disable_lvds;
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
intel_encoder->get_config = intel_lvds_get_config;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index acde294..b812e9d 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -410,7 +410,7 @@
if (bclp > 255)
return ASLC_BACKLIGHT_FAILED;
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev->mode_config.connection_mutex);
/*
* Update backlight on all connectors that support backlight (usually
@@ -421,7 +421,7 @@
intel_panel_set_backlight(intel_connector, bclp, 255);
iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev->mode_config.connection_mutex);
return 0;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index d8adc91..e97ea33 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -213,7 +213,7 @@
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret;
BUG_ON(overlay->last_flip_req);
@@ -236,7 +236,7 @@
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret;
BUG_ON(overlay->active);
@@ -263,7 +263,7 @@
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
u32 flip_addr = overlay->flip_addr;
u32 tmp;
int ret;
@@ -320,7 +320,7 @@
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
u32 flip_addr = overlay->flip_addr;
int ret;
@@ -363,7 +363,7 @@
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret;
if (overlay->last_flip_req == 0)
@@ -389,7 +389,7 @@
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret;
/* Only wait if there is actually an old frame to release to
@@ -688,7 +688,7 @@
u32 swidth, swidthsw, sheight, ostride;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
- BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ BUG_ON(!mutex_is_locked(&dev->mode_config.connection_mutex));
BUG_ON(!overlay);
ret = intel_overlay_release_old_vid(overlay);
@@ -793,7 +793,7 @@
int ret;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
- BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ BUG_ON(!mutex_is_locked(&dev->mode_config.connection_mutex));
ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 44ad415..d4d4156 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -42,6 +42,59 @@
drm_mode_set_crtcinfo(adjusted_mode, 0);
}
+/**
+ * intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
+ * @dev: drm device
+ * @fixed_mode : panel native mode
+ * @connector: LVDS/eDP connector
+ *
+ * Return downclock_avail
+ * Find the reduced downclock for LVDS/eDP in EDID.
+ */
+struct drm_display_mode *
+intel_find_panel_downclock(struct drm_device *dev,
+ struct drm_display_mode *fixed_mode,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *scan, *tmp_mode;
+ int temp_downclock;
+
+ temp_downclock = fixed_mode->clock;
+ tmp_mode = NULL;
+
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ /*
+ * If one mode has the same resolution with the fixed_panel
+ * mode while they have the different refresh rate, it means
+ * that the reduced downclock is found. In such
+ * case we can set the different FPx0/1 to dynamically select
+ * between low and high frequency.
+ */
+ if (scan->hdisplay == fixed_mode->hdisplay &&
+ scan->hsync_start == fixed_mode->hsync_start &&
+ scan->hsync_end == fixed_mode->hsync_end &&
+ scan->htotal == fixed_mode->htotal &&
+ scan->vdisplay == fixed_mode->vdisplay &&
+ scan->vsync_start == fixed_mode->vsync_start &&
+ scan->vsync_end == fixed_mode->vsync_end &&
+ scan->vtotal == fixed_mode->vtotal) {
+ if (scan->clock < temp_downclock) {
+ /*
+ * The downclock is already found. But we
+ * expect to find the lower downclock.
+ */
+ temp_downclock = scan->clock;
+ tmp_mode = scan;
+ }
+ }
+ }
+
+ if (temp_downclock < fixed_mode->clock)
+ return drm_mode_duplicate(dev, tmp_mode);
+ else
+ return NULL;
+}
+
/* adjusted_mode has been preset to be the panel's fixed mode */
void
intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
@@ -323,6 +376,28 @@
pipe_config->gmch_pfit.lvds_border_bits = border;
}
+enum drm_connector_status
+intel_panel_detect(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Assume that the BIOS does not lie through the OpRegion... */
+ if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
+ return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
+ connector_status_connected :
+ connector_status_disconnected;
+ }
+
+ switch (i915.panel_ignore_lid) {
+ case -2:
+ return connector_status_connected;
+ case -1:
+ return connector_status_disconnected;
+ default:
+ return connector_status_unknown;
+ }
+}
+
static u32 intel_panel_compute_brightness(struct intel_connector *connector,
u32 val)
{
@@ -795,40 +870,18 @@
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
}
-enum drm_connector_status
-intel_panel_detect(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /* Assume that the BIOS does not lie through the OpRegion... */
- if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
- return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
- connector_status_connected :
- connector_status_disconnected;
- }
-
- switch (i915.panel_ignore_lid) {
- case -2:
- return connector_status_connected;
- case -1:
- return connector_status_disconnected;
- default:
- return connector_status_unknown;
- }
-}
-
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
static int intel_backlight_device_update_status(struct backlight_device *bd)
{
struct intel_connector *connector = bl_get_data(bd);
struct drm_device *dev = connector->base.dev;
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev->mode_config.connection_mutex);
DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n",
bd->props.brightness, bd->props.max_brightness);
intel_panel_set_backlight(connector, bd->props.brightness,
bd->props.max_brightness);
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev->mode_config.connection_mutex);
return 0;
}
@@ -840,9 +893,9 @@
int ret;
intel_runtime_pm_get(dev_priv);
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev->mode_config.connection_mutex);
ret = intel_panel_get_backlight(connector);
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev->mode_config.connection_mutex);
intel_runtime_pm_put(dev_priv);
return ret;
@@ -1077,7 +1130,7 @@
if (ret) {
DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
- drm_get_connector_name(connector));
+ connector->name);
return ret;
}
@@ -1103,59 +1156,6 @@
intel_backlight_device_unregister(intel_connector);
}
-/**
- * intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
- * @dev: drm device
- * @fixed_mode : panel native mode
- * @connector: LVDS/eDP connector
- *
- * Return downclock_avail
- * Find the reduced downclock for LVDS/eDP in EDID.
- */
-struct drm_display_mode *
-intel_find_panel_downclock(struct drm_device *dev,
- struct drm_display_mode *fixed_mode,
- struct drm_connector *connector)
-{
- struct drm_display_mode *scan, *tmp_mode;
- int temp_downclock;
-
- temp_downclock = fixed_mode->clock;
- tmp_mode = NULL;
-
- list_for_each_entry(scan, &connector->probed_modes, head) {
- /*
- * If one mode has the same resolution with the fixed_panel
- * mode while they have the different refresh rate, it means
- * that the reduced downclock is found. In such
- * case we can set the different FPx0/1 to dynamically select
- * between low and high frequency.
- */
- if (scan->hdisplay == fixed_mode->hdisplay &&
- scan->hsync_start == fixed_mode->hsync_start &&
- scan->hsync_end == fixed_mode->hsync_end &&
- scan->htotal == fixed_mode->htotal &&
- scan->vdisplay == fixed_mode->vdisplay &&
- scan->vsync_start == fixed_mode->vsync_start &&
- scan->vsync_end == fixed_mode->vsync_end &&
- scan->vtotal == fixed_mode->vtotal) {
- if (scan->clock < temp_downclock) {
- /*
- * The downclock is already found. But we
- * expect to find the lower downclock.
- */
- temp_downclock = scan->clock;
- tmp_mode = scan;
- }
- }
- }
-
- if (temp_downclock < fixed_mode->clock)
- return drm_mode_duplicate(dev, tmp_mode);
- else
- return NULL;
-}
-
/* Set up chip specific backlight functions */
void intel_panel_init_backlight_funcs(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 834c49c..b86b58c 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -487,7 +487,7 @@
* - new fb is too large to fit in compressed buffer
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
- list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, tmp_crtc) {
if (intel_crtc_active(tmp_crtc) &&
to_intel_crtc(tmp_crtc)->primary_enabled) {
if (crtc) {
@@ -1010,7 +1010,7 @@
{
struct drm_crtc *crtc, *enabled = NULL;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, crtc) {
if (intel_crtc_active(crtc)) {
if (enabled)
return NULL;
@@ -2077,7 +2077,7 @@
wm[3] *= 2;
}
-static int ilk_wm_max_level(const struct drm_device *dev)
+int ilk_wm_max_level(const struct drm_device *dev)
{
/* how many WM levels are we expecting */
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
@@ -2170,7 +2170,7 @@
struct intel_crtc *intel_crtc;
/* Compute the currently _active_ config */
- list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
+ for_each_intel_crtc(dev, intel_crtc) {
const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
if (!wm->pipe_enabled)
@@ -2254,7 +2254,7 @@
ret_wm->enable = true;
- list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
+ for_each_intel_crtc(dev, intel_crtc) {
const struct intel_pipe_wm *active = &intel_crtc->wm.active;
const struct intel_wm_level *wm = &active->wm[level];
@@ -2400,7 +2400,7 @@
}
/* LP0 register values */
- list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
+ for_each_intel_crtc(dev, intel_crtc) {
enum pipe pipe = intel_crtc->pipe;
const struct intel_wm_level *r =
&intel_crtc->wm.active.wm[0];
@@ -2747,7 +2747,7 @@
struct ilk_wm_values *hw = &dev_priv->wm.hw;
struct drm_crtc *crtc;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ for_each_crtc(dev, crtc)
ilk_pipe_wm_get_hw_state(crtc);
hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
@@ -3114,6 +3114,9 @@
if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
mask |= GEN6_PM_RP_UP_EI_EXPIRED;
+ if (IS_GEN8(dev_priv->dev))
+ mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+
return ~mask;
}
@@ -3246,6 +3249,26 @@
trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
}
+static void gen8_disable_rps_interrupts(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
+ I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
+ ~dev_priv->pm_rps_events);
+ /* Complete PM interrupt masking here doesn't race with the rps work
+ * item again unmasking PM interrupts because that is using a different
+ * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
+ * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
+ * gen8_enable_rps will clean up. */
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ dev_priv->rps.pm_iir = 0;
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
+}
+
static void gen6_disable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3272,7 +3295,10 @@
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
- gen6_disable_rps_interrupts(dev);
+ if (IS_BROADWELL(dev))
+ gen8_disable_rps_interrupts(dev);
+ else
+ gen6_disable_rps_interrupts(dev);
}
static void valleyview_disable_rps(struct drm_device *dev)
@@ -3308,10 +3334,6 @@
if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
return 0;
- /* Disable RC6 on Broadwell for now */
- if (IS_BROADWELL(dev))
- return 0;
-
/* Respect the kernel parameter if it is set */
if (enable_rc6 >= 0) {
int mask;
@@ -3324,7 +3346,7 @@
if ((enable_rc6 & mask) != enable_rc6)
DRM_INFO("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
- enable_rc6, enable_rc6 & mask, mask);
+ enable_rc6 & mask, enable_rc6, mask);
return enable_rc6 & mask;
}
@@ -3344,6 +3366,17 @@
return i915.enable_rc6;
}
+static void gen8_enable_rps_interrupts(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ WARN_ON(dev_priv->rps.pm_iir);
+ bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
static void gen6_enable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3379,7 +3412,7 @@
static void gen8_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
uint32_t rc6_mask = 0, rp_state_cap;
int unused;
@@ -3433,11 +3466,15 @@
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+ /* WaDisablePwrmtrEvent:chv (pre-production hw) */
+ I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
+ I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
+
/* 5: Enable RPS */
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
@@ -3446,7 +3483,7 @@
gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
- gen6_enable_rps_interrupts(dev);
+ gen8_enable_rps_interrupts(dev);
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -3454,7 +3491,7 @@
static void gen6_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
u32 rp_state_cap;
u32 gt_perf_status;
u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
@@ -3783,7 +3820,7 @@
static void valleyview_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
u32 gtfifodbg, val, rc6_mode = 0;
int i;
@@ -3914,7 +3951,7 @@
static void ironlake_enable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
bool was_interruptible;
int ret;
@@ -4426,7 +4463,7 @@
bool i915_gpu_busy(void)
{
struct drm_i915_private *dev_priv;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
bool ret = false;
int i;
@@ -4608,8 +4645,10 @@
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
ironlake_disable_rc6(dev);
- } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
- cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
+ } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
+ if (cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work))
+ intel_runtime_pm_put(dev_priv);
+
cancel_work_sync(&dev_priv->rps.work);
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev))
@@ -4655,7 +4694,7 @@
ironlake_enable_rc6(dev);
intel_init_emon(dev);
mutex_unlock(&dev->struct_mutex);
- } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
/*
* PCU communication is slow and this doesn't need to be
* done at any specific time, so do this out of our fast path
@@ -5335,6 +5374,59 @@
I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
}
+static void cherryview_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
+
+ I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+
+ /* WaDisablePartialInstShootdown:chv */
+ I915_WRITE(GEN8_ROW_CHICKEN,
+ _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
+
+ /* WaDisableThreadStallDopClockGating:chv */
+ I915_WRITE(GEN8_ROW_CHICKEN,
+ _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
+
+ /* WaVSRefCountFullforceMissDisable:chv */
+ /* WaDSRefCountFullforceMissDisable:chv */
+ I915_WRITE(GEN7_FF_THREAD_MODE,
+ I915_READ(GEN7_FF_THREAD_MODE) &
+ ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
+
+ /* WaDisableSemaphoreAndSyncFlipWait:chv */
+ I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
+
+ /* WaDisableCSUnitClockGating:chv */
+ I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
+ GEN6_CSUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableSDEUnitClockGating:chv */
+ I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableSamplerPowerBypass:chv (pre-production hw) */
+ I915_WRITE(HALF_SLICE_CHICKEN3,
+ _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
+
+ /* WaDisableGunitClockGating:chv (pre-production hw) */
+ I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) |
+ GINT_DIS);
+
+ /* WaDisableFfDopClockGating:chv (pre-production hw) */
+ I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE));
+
+ /* WaDisableDopClockGating:chv (pre-production hw) */
+ I915_WRITE(GEN7_ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
+ GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
+}
+
static void g4x_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5545,33 +5637,6 @@
}
}
-static void reset_vblank_counter(struct drm_device *dev, enum pipe pipe)
-{
- assert_spin_locked(&dev->vbl_lock);
-
- dev->vblank[pipe].last = 0;
-}
-
-static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- enum pipe pipe;
- unsigned long irqflags;
-
- /*
- * After this, the registers on the pipes that are part of the power
- * well will become zero, so we have to adjust our counters according to
- * that.
- *
- * FIXME: Should we do this in general in drm_vblank_post_modeset?
- */
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- for_each_pipe(pipe)
- if (pipe != PIPE_A)
- reset_vblank_counter(dev, pipe);
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-}
-
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
@@ -5600,8 +5665,6 @@
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
POSTING_READ(HSW_PWR_WELL_DRIVER);
DRM_DEBUG_KMS("Requesting to disable the power well\n");
-
- hsw_power_well_post_disable(dev_priv);
}
}
}
@@ -5758,23 +5821,12 @@
static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- struct drm_device *dev = dev_priv->dev;
- enum pipe pipe;
-
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
spin_lock_irq(&dev_priv->irq_lock);
- for_each_pipe(pipe)
- __intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
-
valleyview_disable_display_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
- spin_lock_irq(&dev->vbl_lock);
- for_each_pipe(pipe)
- reset_vblank_counter(dev, pipe);
- spin_unlock_irq(&dev->vbl_lock);
-
vlv_set_power_well(dev_priv, power_well, false);
}
@@ -6270,6 +6322,10 @@
dev_priv->display.init_clock_gating = haswell_init_clock_gating;
else if (INTEL_INFO(dev)->gen == 8)
dev_priv->display.init_clock_gating = gen8_init_clock_gating;
+ } else if (IS_CHERRYVIEW(dev)) {
+ dev_priv->display.update_wm = valleyview_update_wm;
+ dev_priv->display.init_clock_gating =
+ cherryview_init_clock_gating;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.update_wm = valleyview_update_wm;
dev_priv->display.init_clock_gating =
diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/intel_renderstate.h
new file mode 100644
index 0000000..a5e783a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_renderstate.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _INTEL_RENDERSTATE_H
+#define _INTEL_RENDERSTATE_H
+
+#include <linux/types.h>
+
+struct intel_renderstate_rodata {
+ const u32 *reloc;
+ const u32 reloc_items;
+ const u32 *batch;
+ const u32 batch_items;
+};
+
+extern const struct intel_renderstate_rodata gen6_null_state;
+extern const struct intel_renderstate_rodata gen7_null_state;
+extern const struct intel_renderstate_rodata gen8_null_state;
+
+#define RO_RENDERSTATE(_g) \
+ const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
+ .reloc = gen ## _g ## _null_state_relocs, \
+ .reloc_items = sizeof(gen ## _g ## _null_state_relocs)/4, \
+ .batch = gen ## _g ## _null_state_batch, \
+ .batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \
+ }
+
+#endif /* INTEL_RENDERSTATE_H */
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen6.c b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
new file mode 100644
index 0000000..740538a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
@@ -0,0 +1,289 @@
+#include "intel_renderstate.h"
+
+static const u32 gen6_null_state_relocs[] = {
+ 0x00000020,
+ 0x00000024,
+ 0x0000002c,
+ 0x000001e0,
+ 0x000001e4,
+};
+
+static const u32 gen6_null_state_batch[] = {
+ 0x69040000,
+ 0x790d0001,
+ 0x00000000,
+ 0x00000000,
+ 0x78180000,
+ 0x00000001,
+ 0x61010008,
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001,
+ 0x00000000,
+ 0x00000001,
+ 0x61020000,
+ 0x00000000,
+ 0x78050001,
+ 0x00000018,
+ 0x00000000,
+ 0x780d1002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000420,
+ 0x78150003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78100004,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78160003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78110005,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78120002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78170003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79050005,
+ 0xe0040000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79100000,
+ 0x00000000,
+ 0x79000002,
+ 0xffffffff,
+ 0x00000000,
+ 0x00000000,
+ 0x780e0002,
+ 0x00000441,
+ 0x00000401,
+ 0x00000401,
+ 0x78021002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000400,
+ 0x78130012,
+ 0x00400810,
+ 0x00000000,
+ 0x20000000,
+ 0x04000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78140007,
+ 0x00000280,
+ 0x08080000,
+ 0x00000000,
+ 0x00060000,
+ 0x4e080002,
+ 0x00100400,
+ 0x00000000,
+ 0x00000000,
+ 0x78090005,
+ 0x02000000,
+ 0x22220000,
+ 0x02f60000,
+ 0x11330000,
+ 0x02850004,
+ 0x11220000,
+ 0x78011002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000200,
+ 0x78080003,
+ 0x00002000,
+ 0x00000448, /* reloc */
+ 0x00000448, /* reloc */
+ 0x00000000,
+ 0x05000000, /* cmds end */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000220, /* state start */
+ 0x00000240,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0060005a,
+ 0x204077be,
+ 0x000000c0,
+ 0x008d0040,
+ 0x0060005a,
+ 0x206077be,
+ 0x000000c0,
+ 0x008d0080,
+ 0x0060005a,
+ 0x208077be,
+ 0x000000d0,
+ 0x008d0040,
+ 0x0060005a,
+ 0x20a077be,
+ 0x000000d0,
+ 0x008d0080,
+ 0x00000201,
+ 0x20080061,
+ 0x00000000,
+ 0x00000000,
+ 0x00600001,
+ 0x20200022,
+ 0x008d0000,
+ 0x00000000,
+ 0x02800031,
+ 0x21c01cc9,
+ 0x00000020,
+ 0x0a8a0001,
+ 0x00600001,
+ 0x204003be,
+ 0x008d01c0,
+ 0x00000000,
+ 0x00600001,
+ 0x206003be,
+ 0x008d01e0,
+ 0x00000000,
+ 0x00600001,
+ 0x208003be,
+ 0x008d0200,
+ 0x00000000,
+ 0x00600001,
+ 0x20a003be,
+ 0x008d0220,
+ 0x00000000,
+ 0x00600001,
+ 0x20c003be,
+ 0x008d0240,
+ 0x00000000,
+ 0x00600001,
+ 0x20e003be,
+ 0x008d0260,
+ 0x00000000,
+ 0x00600001,
+ 0x210003be,
+ 0x008d0280,
+ 0x00000000,
+ 0x00600001,
+ 0x212003be,
+ 0x008d02a0,
+ 0x00000000,
+ 0x05800031,
+ 0x24001cc8,
+ 0x00000040,
+ 0x90019000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x30000000,
+ 0x00000124,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xf99a130c,
+ 0x799a130c,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x80000031,
+ 0x00000003,
+ 0x00000000, /* state end */
+};
+
+RO_RENDERSTATE(6);
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen7.c b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
new file mode 100644
index 0000000..6fa7ff2
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
@@ -0,0 +1,253 @@
+#include "intel_renderstate.h"
+
+static const u32 gen7_null_state_relocs[] = {
+ 0x0000000c,
+ 0x00000010,
+ 0x00000018,
+ 0x000001ec,
+};
+
+static const u32 gen7_null_state_batch[] = {
+ 0x69040000,
+ 0x61010008,
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001,
+ 0x00000000,
+ 0x00000001,
+ 0x790d0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78180000,
+ 0x00000001,
+ 0x79160000,
+ 0x00000008,
+ 0x78300000,
+ 0x02010040,
+ 0x78310000,
+ 0x04000000,
+ 0x78320000,
+ 0x04000000,
+ 0x78330000,
+ 0x02000000,
+ 0x78100004,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781b0005,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781c0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781d0004,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78110005,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78120002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78210000,
+ 0x00000000,
+ 0x78130005,
+ 0x00000000,
+ 0x20000000,
+ 0x04000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78140001,
+ 0x20000800,
+ 0x00000000,
+ 0x781e0001,
+ 0x00000000,
+ 0x00000000,
+ 0x78050005,
+ 0xe0040000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78040001,
+ 0x00000000,
+ 0x00000000,
+ 0x78240000,
+ 0x00000240,
+ 0x78230000,
+ 0x00000260,
+ 0x782f0000,
+ 0x00000280,
+ 0x781f000c,
+ 0x00400810,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78200006,
+ 0x000002c0,
+ 0x08080000,
+ 0x00000000,
+ 0x28000402,
+ 0x00060000,
+ 0x00000000,
+ 0x00000000,
+ 0x78090005,
+ 0x02000000,
+ 0x22220000,
+ 0x02f60000,
+ 0x11230000,
+ 0x02f60004,
+ 0x11230000,
+ 0x78080003,
+ 0x00006008,
+ 0x00000340, /* reloc */
+ 0xffffffff,
+ 0x00000000,
+ 0x782a0000,
+ 0x00000360,
+ 0x79000002,
+ 0xffffffff,
+ 0x00000000,
+ 0x00000000,
+ 0x7b000005,
+ 0x0000000f,
+ 0x00000003,
+ 0x00000000,
+ 0x00000001,
+ 0x00000000,
+ 0x00000000,
+ 0x05000000, /* cmds end */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000031, /* state start */
+ 0x00000003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xf99a130c,
+ 0x799a130c,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000492,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0080005a,
+ 0x2e2077bd,
+ 0x000000c0,
+ 0x008d0040,
+ 0x0080005a,
+ 0x2e6077bd,
+ 0x000000d0,
+ 0x008d0040,
+ 0x02800031,
+ 0x21801fa9,
+ 0x008d0e20,
+ 0x08840001,
+ 0x00800001,
+ 0x2e2003bd,
+ 0x008d0180,
+ 0x00000000,
+ 0x00800001,
+ 0x2e6003bd,
+ 0x008d01c0,
+ 0x00000000,
+ 0x00800001,
+ 0x2ea003bd,
+ 0x008d0200,
+ 0x00000000,
+ 0x00800001,
+ 0x2ee003bd,
+ 0x008d0240,
+ 0x00000000,
+ 0x05800031,
+ 0x20001fa8,
+ 0x008d0e20,
+ 0x90031000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000380,
+ 0x000003a0,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000, /* state end */
+};
+
+RO_RENDERSTATE(7);
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
new file mode 100644
index 0000000..5c87561
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
@@ -0,0 +1,479 @@
+#include "intel_renderstate.h"
+
+static const u32 gen8_null_state_relocs[] = {
+ 0x00000048,
+ 0x00000050,
+ 0x00000060,
+ 0x000003ec,
+};
+
+static const u32 gen8_null_state_batch[] = {
+ 0x69040000,
+ 0x61020001,
+ 0x00000000,
+ 0x00000000,
+ 0x79120000,
+ 0x00000000,
+ 0x79130000,
+ 0x00000000,
+ 0x79140000,
+ 0x00000000,
+ 0x79150000,
+ 0x00000000,
+ 0x79160000,
+ 0x00000000,
+ 0x6101000e,
+ 0x00000001,
+ 0x00000000,
+ 0x00000001,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0xfffff001,
+ 0x00001001,
+ 0xfffff001,
+ 0x00001001,
+ 0x78230000,
+ 0x000006e0,
+ 0x78210000,
+ 0x00000700,
+ 0x78300000,
+ 0x08010040,
+ 0x78330000,
+ 0x08000000,
+ 0x78310000,
+ 0x08000000,
+ 0x78320000,
+ 0x08000000,
+ 0x78240000,
+ 0x00000641,
+ 0x780e0000,
+ 0x00000601,
+ 0x780d0000,
+ 0x00000000,
+ 0x78180000,
+ 0x00000001,
+ 0x78520003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78190009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781b0007,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78270000,
+ 0x00000000,
+ 0x782c0000,
+ 0x00000000,
+ 0x781c0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78160009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78110008,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78290000,
+ 0x00000000,
+ 0x782e0000,
+ 0x00000000,
+ 0x781a0009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781d0007,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78280000,
+ 0x00000000,
+ 0x782d0000,
+ 0x00000000,
+ 0x78260000,
+ 0x00000000,
+ 0x782b0000,
+ 0x00000000,
+ 0x78150009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78100007,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781e0003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78120002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781f0002,
+ 0x30400820,
+ 0x00000000,
+ 0x00000000,
+ 0x78510009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78500003,
+ 0x00210000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78130002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x782a0000,
+ 0x00000480,
+ 0x782f0000,
+ 0x00000540,
+ 0x78140000,
+ 0x00000800,
+ 0x78170009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x7820000a,
+ 0x00000580,
+ 0x00000000,
+ 0x08080000,
+ 0x00000000,
+ 0x00000000,
+ 0x1f000002,
+ 0x00060000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x784d0000,
+ 0x40000000,
+ 0x784f0000,
+ 0x80000100,
+ 0x780f0000,
+ 0x00000740,
+ 0x78050006,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78070003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78060003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78040001,
+ 0x00000000,
+ 0x00000001,
+ 0x79000002,
+ 0xffffffff,
+ 0x00000000,
+ 0x00000000,
+ 0x78080003,
+ 0x00006000,
+ 0x000005e0, /* reloc */
+ 0x00000000,
+ 0x00000000,
+ 0x78090005,
+ 0x02000000,
+ 0x22220000,
+ 0x02f60000,
+ 0x11230000,
+ 0x02850004,
+ 0x11230000,
+ 0x784b0000,
+ 0x0000000f,
+ 0x78490001,
+ 0x00000000,
+ 0x00000000,
+ 0x7b000005,
+ 0x00000000,
+ 0x00000003,
+ 0x00000000,
+ 0x00000001,
+ 0x00000000,
+ 0x00000000,
+ 0x05000000, /* cmds end */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x000004c0, /* state start */
+ 0x00000500,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000092,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0060005a,
+ 0x21403ae8,
+ 0x3a0000c0,
+ 0x008d0040,
+ 0x0060005a,
+ 0x21603ae8,
+ 0x3a0000c0,
+ 0x008d0080,
+ 0x0060005a,
+ 0x21803ae8,
+ 0x3a0000d0,
+ 0x008d0040,
+ 0x0060005a,
+ 0x21a03ae8,
+ 0x3a0000d0,
+ 0x008d0080,
+ 0x02800031,
+ 0x2e0022e8,
+ 0x0e000140,
+ 0x08840001,
+ 0x05800031,
+ 0x200022e0,
+ 0x0e000e00,
+ 0x90031000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xf99a130c,
+ 0x799a130c,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000, /* state end */
+};
+
+RO_RENDERSTATE(8);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 40a7aa4..3379722 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -40,30 +40,37 @@
*/
#define CACHELINE_BYTES 64
-static inline int ring_space(struct intel_ring_buffer *ring)
+static inline int __ring_space(int head, int tail, int size)
{
- int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
+ int space = head - (tail + I915_RING_FREE_SPACE);
if (space < 0)
- space += ring->size;
+ space += size;
return space;
}
-static bool intel_ring_stopped(struct intel_ring_buffer *ring)
+static inline int ring_space(struct intel_engine_cs *ring)
+{
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+ return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size);
+}
+
+static bool intel_ring_stopped(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
}
-void __intel_ring_advance(struct intel_ring_buffer *ring)
+void __intel_ring_advance(struct intel_engine_cs *ring)
{
- ring->tail &= ring->size - 1;
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+ ringbuf->tail &= ringbuf->size - 1;
if (intel_ring_stopped(ring))
return;
- ring->write_tail(ring, ring->tail);
+ ring->write_tail(ring, ringbuf->tail);
}
static int
-gen2_render_ring_flush(struct intel_ring_buffer *ring,
+gen2_render_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains,
u32 flush_domains)
{
@@ -89,7 +96,7 @@
}
static int
-gen4_render_ring_flush(struct intel_ring_buffer *ring,
+gen4_render_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains,
u32 flush_domains)
{
@@ -184,7 +191,7 @@
* really our business. That leaves only stall at scoreboard.
*/
static int
-intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
+intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
{
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
@@ -219,7 +226,7 @@
}
static int
-gen6_render_ring_flush(struct intel_ring_buffer *ring,
+gen6_render_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
@@ -271,7 +278,7 @@
}
static int
-gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
+gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
{
int ret;
@@ -289,7 +296,7 @@
return 0;
}
-static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
+static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value)
{
int ret;
@@ -313,7 +320,7 @@
}
static int
-gen7_render_ring_flush(struct intel_ring_buffer *ring,
+gen7_render_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
@@ -374,7 +381,7 @@
}
static int
-gen8_render_ring_flush(struct intel_ring_buffer *ring,
+gen8_render_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
@@ -414,14 +421,14 @@
}
-static void ring_write_tail(struct intel_ring_buffer *ring,
+static void ring_write_tail(struct intel_engine_cs *ring,
u32 value)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
I915_WRITE_TAIL(ring, value);
}
-u64 intel_ring_get_active_head(struct intel_ring_buffer *ring)
+u64 intel_ring_get_active_head(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u64 acthd;
@@ -437,7 +444,7 @@
return acthd;
}
-static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
+static void ring_setup_phys_status_page(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 addr;
@@ -448,7 +455,7 @@
I915_WRITE(HWS_PGA, addr);
}
-static bool stop_ring(struct intel_ring_buffer *ring)
+static bool stop_ring(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = to_i915(ring->dev);
@@ -472,11 +479,12 @@
return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
}
-static int init_ring_common(struct intel_ring_buffer *ring)
+static int init_ring_common(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj = ring->obj;
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+ struct drm_i915_gem_object *obj = ringbuf->obj;
int ret = 0;
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
@@ -515,7 +523,7 @@
* register values. */
I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
I915_WRITE_CTL(ring,
- ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
+ ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_VALID);
/* If the head is still not zero, the ring is dead */
@@ -535,10 +543,10 @@
if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
i915_kernel_lost_context(ring->dev);
else {
- ring->head = I915_READ_HEAD(ring);
- ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ring->space = ring_space(ring);
- ring->last_retired_head = -1;
+ ringbuf->head = I915_READ_HEAD(ring);
+ ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+ ringbuf->space = ring_space(ring);
+ ringbuf->last_retired_head = -1;
}
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
@@ -550,7 +558,7 @@
}
static int
-init_pipe_control(struct intel_ring_buffer *ring)
+init_pipe_control(struct intel_engine_cs *ring)
{
int ret;
@@ -591,7 +599,7 @@
return ret;
}
-static int init_render_ring(struct intel_ring_buffer *ring)
+static int init_render_ring(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -605,7 +613,7 @@
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
* programmed to '1' on all products.
*
- * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw
+ * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
*/
if (INTEL_INFO(dev)->gen >= 6)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
@@ -647,7 +655,7 @@
return ret;
}
-static void render_ring_cleanup(struct intel_ring_buffer *ring)
+static void render_ring_cleanup(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
@@ -663,12 +671,12 @@
ring->scratch.obj = NULL;
}
-static int gen6_signal(struct intel_ring_buffer *signaller,
+static int gen6_signal(struct intel_engine_cs *signaller,
unsigned int num_dwords)
{
struct drm_device *dev = signaller->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *useless;
+ struct intel_engine_cs *useless;
int i, ret;
/* NB: In order to be able to do semaphore MBOX updates for varying
@@ -679,6 +687,8 @@
#define MBOX_UPDATE_DWORDS 4
if (i915_semaphore_is_enabled(dev))
num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
+ else
+ return intel_ring_begin(signaller, num_dwords);
ret = intel_ring_begin(signaller, num_dwords);
if (ret)
@@ -713,7 +723,7 @@
* This acts like a signal in the canonical semaphore.
*/
static int
-gen6_add_request(struct intel_ring_buffer *ring)
+gen6_add_request(struct intel_engine_cs *ring)
{
int ret;
@@ -745,8 +755,8 @@
* @seqno - seqno which the waiter will block on
*/
static int
-gen6_ring_sync(struct intel_ring_buffer *waiter,
- struct intel_ring_buffer *signaller,
+gen6_ring_sync(struct intel_engine_cs *waiter,
+ struct intel_engine_cs *signaller,
u32 seqno)
{
u32 dw1 = MI_SEMAPHORE_MBOX |
@@ -794,7 +804,7 @@
} while (0)
static int
-pc_render_add_request(struct intel_ring_buffer *ring)
+pc_render_add_request(struct intel_engine_cs *ring)
{
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
@@ -842,7 +852,7 @@
}
static u32
-gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
{
/* Workaround to force correct ordering between irq and seqno writes on
* ivb (and maybe also on snb) by reading from a CS register (like
@@ -856,31 +866,31 @@
}
static u32
-ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
{
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
static void
-ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
+ring_set_seqno(struct intel_engine_cs *ring, u32 seqno)
{
intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
}
static u32
-pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
{
return ring->scratch.cpu_page[0];
}
static void
-pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
+pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno)
{
ring->scratch.cpu_page[0] = seqno;
}
static bool
-gen5_ring_get_irq(struct intel_ring_buffer *ring)
+gen5_ring_get_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -898,7 +908,7 @@
}
static void
-gen5_ring_put_irq(struct intel_ring_buffer *ring)
+gen5_ring_put_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -911,7 +921,7 @@
}
static bool
-i9xx_ring_get_irq(struct intel_ring_buffer *ring)
+i9xx_ring_get_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -932,7 +942,7 @@
}
static void
-i9xx_ring_put_irq(struct intel_ring_buffer *ring)
+i9xx_ring_put_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -948,7 +958,7 @@
}
static bool
-i8xx_ring_get_irq(struct intel_ring_buffer *ring)
+i8xx_ring_get_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -969,7 +979,7 @@
}
static void
-i8xx_ring_put_irq(struct intel_ring_buffer *ring)
+i8xx_ring_put_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -984,7 +994,7 @@
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
-void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
+void intel_ring_setup_status_page(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -1047,7 +1057,7 @@
}
static int
-bsd_ring_flush(struct intel_ring_buffer *ring,
+bsd_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains,
u32 flush_domains)
{
@@ -1064,7 +1074,7 @@
}
static int
-i9xx_add_request(struct intel_ring_buffer *ring)
+i9xx_add_request(struct intel_engine_cs *ring)
{
int ret;
@@ -1082,7 +1092,7 @@
}
static bool
-gen6_ring_get_irq(struct intel_ring_buffer *ring)
+gen6_ring_get_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1107,7 +1117,7 @@
}
static void
-gen6_ring_put_irq(struct intel_ring_buffer *ring)
+gen6_ring_put_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1125,7 +1135,7 @@
}
static bool
-hsw_vebox_get_irq(struct intel_ring_buffer *ring)
+hsw_vebox_get_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1145,7 +1155,7 @@
}
static void
-hsw_vebox_put_irq(struct intel_ring_buffer *ring)
+hsw_vebox_put_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1163,7 +1173,7 @@
}
static bool
-gen8_ring_get_irq(struct intel_ring_buffer *ring)
+gen8_ring_get_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1189,7 +1199,7 @@
}
static void
-gen8_ring_put_irq(struct intel_ring_buffer *ring)
+gen8_ring_put_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1209,7 +1219,7 @@
}
static int
-i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
+i965_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 length,
unsigned flags)
{
@@ -1232,7 +1242,7 @@
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
#define I830_BATCH_LIMIT (256*1024)
static int
-i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
+i830_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
unsigned flags)
{
@@ -1283,7 +1293,7 @@
}
static int
-i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
+i915_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
unsigned flags)
{
@@ -1300,7 +1310,7 @@
return 0;
}
-static void cleanup_status_page(struct intel_ring_buffer *ring)
+static void cleanup_status_page(struct intel_engine_cs *ring)
{
struct drm_i915_gem_object *obj;
@@ -1314,7 +1324,7 @@
ring->status_page.obj = NULL;
}
-static int init_status_page(struct intel_ring_buffer *ring)
+static int init_status_page(struct intel_engine_cs *ring)
{
struct drm_i915_gem_object *obj;
@@ -1351,7 +1361,7 @@
return 0;
}
-static int init_phys_status_page(struct intel_ring_buffer *ring)
+static int init_phys_status_page(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -1368,21 +1378,22 @@
return 0;
}
-static int allocate_ring_buffer(struct intel_ring_buffer *ring)
+static int allocate_ring_buffer(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_ringbuffer *ringbuf = ring->buffer;
struct drm_i915_gem_object *obj;
int ret;
- if (ring->obj)
+ if (intel_ring_initialized(ring))
return 0;
obj = NULL;
if (!HAS_LLC(dev))
- obj = i915_gem_object_create_stolen(dev, ring->size);
+ obj = i915_gem_object_create_stolen(dev, ringbuf->size);
if (obj == NULL)
- obj = i915_gem_alloc_object(dev, ring->size);
+ obj = i915_gem_alloc_object(dev, ringbuf->size);
if (obj == NULL)
return -ENOMEM;
@@ -1394,15 +1405,15 @@
if (ret)
goto err_unpin;
- ring->virtual_start =
+ ringbuf->virtual_start =
ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
- ring->size);
- if (ring->virtual_start == NULL) {
+ ringbuf->size);
+ if (ringbuf->virtual_start == NULL) {
ret = -EINVAL;
goto err_unpin;
}
- ring->obj = obj;
+ ringbuf->obj = obj;
return 0;
err_unpin:
@@ -1413,14 +1424,22 @@
}
static int intel_init_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
+ struct intel_ringbuffer *ringbuf = ring->buffer;
int ret;
+ if (ringbuf == NULL) {
+ ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
+ if (!ringbuf)
+ return -ENOMEM;
+ ring->buffer = ringbuf;
+ }
+
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
- ring->size = 32 * PAGE_SIZE;
+ ringbuf->size = 32 * PAGE_SIZE;
memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
init_waitqueue_head(&ring->irq_queue);
@@ -1428,48 +1447,60 @@
if (I915_NEED_GFX_HWS(dev)) {
ret = init_status_page(ring);
if (ret)
- return ret;
+ goto error;
} else {
BUG_ON(ring->id != RCS);
ret = init_phys_status_page(ring);
if (ret)
- return ret;
+ goto error;
}
ret = allocate_ring_buffer(ring);
if (ret) {
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
- return ret;
+ goto error;
}
/* Workaround an erratum on the i830 which causes a hang if
* the TAIL pointer points to within the last 2 cachelines
* of the buffer.
*/
- ring->effective_size = ring->size;
+ ringbuf->effective_size = ringbuf->size;
if (IS_I830(dev) || IS_845G(dev))
- ring->effective_size -= 2 * CACHELINE_BYTES;
+ ringbuf->effective_size -= 2 * CACHELINE_BYTES;
- i915_cmd_parser_init_ring(ring);
+ ret = i915_cmd_parser_init_ring(ring);
+ if (ret)
+ goto error;
- return ring->init(ring);
+ ret = ring->init(ring);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ kfree(ringbuf);
+ ring->buffer = NULL;
+ return ret;
}
-void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
+void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = to_i915(ring->dev);
+ struct intel_ringbuffer *ringbuf = ring->buffer;
- if (ring->obj == NULL)
+ if (!intel_ring_initialized(ring))
return;
intel_stop_ring_buffer(ring);
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
- iounmap(ring->virtual_start);
+ iounmap(ringbuf->virtual_start);
- i915_gem_object_ggtt_unpin(ring->obj);
- drm_gem_object_unreference(&ring->obj->base);
- ring->obj = NULL;
+ i915_gem_object_ggtt_unpin(ringbuf->obj);
+ drm_gem_object_unreference(&ringbuf->obj->base);
+ ringbuf->obj = NULL;
ring->preallocated_lazy_request = NULL;
ring->outstanding_lazy_seqno = 0;
@@ -1477,44 +1508,34 @@
ring->cleanup(ring);
cleanup_status_page(ring);
+
+ i915_cmd_parser_fini_ring(ring);
+
+ kfree(ringbuf);
+ ring->buffer = NULL;
}
-static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
+static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
{
+ struct intel_ringbuffer *ringbuf = ring->buffer;
struct drm_i915_gem_request *request;
- u32 seqno = 0, tail;
+ u32 seqno = 0;
int ret;
- if (ring->last_retired_head != -1) {
- ring->head = ring->last_retired_head;
- ring->last_retired_head = -1;
+ if (ringbuf->last_retired_head != -1) {
+ ringbuf->head = ringbuf->last_retired_head;
+ ringbuf->last_retired_head = -1;
- ring->space = ring_space(ring);
- if (ring->space >= n)
+ ringbuf->space = ring_space(ring);
+ if (ringbuf->space >= n)
return 0;
}
list_for_each_entry(request, &ring->request_list, list) {
- int space;
-
- if (request->tail == -1)
- continue;
-
- space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
- if (space < 0)
- space += ring->size;
- if (space >= n) {
+ if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) {
seqno = request->seqno;
- tail = request->tail;
break;
}
-
- /* Consume this request in case we need more space than
- * is available and so need to prevent a race between
- * updating last_retired_head and direct reads of
- * I915_RING_HEAD. It also provides a nice sanity check.
- */
- request->tail = -1;
}
if (seqno == 0)
@@ -1524,18 +1545,19 @@
if (ret)
return ret;
- ring->head = tail;
- ring->space = ring_space(ring);
- if (WARN_ON(ring->space < n))
- return -ENOSPC;
+ i915_gem_retire_requests_ring(ring);
+ ringbuf->head = ringbuf->last_retired_head;
+ ringbuf->last_retired_head = -1;
+ ringbuf->space = ring_space(ring);
return 0;
}
-static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
+static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ringbuffer *ringbuf = ring->buffer;
unsigned long end;
int ret;
@@ -1546,7 +1568,6 @@
/* force the tail write in case we have been skipping them */
__intel_ring_advance(ring);
- trace_i915_ring_wait_begin(ring);
/* With GEM the hangcheck timer should kick us out of the loop,
* leaving it early runs the risk of corrupting GEM state (due
* to running on almost untested codepaths). But on resume
@@ -1554,12 +1575,13 @@
* case by choosing an insanely large timeout. */
end = jiffies + 60 * HZ;
+ trace_i915_ring_wait_begin(ring);
do {
- ring->head = I915_READ_HEAD(ring);
- ring->space = ring_space(ring);
- if (ring->space >= n) {
- trace_i915_ring_wait_end(ring);
- return 0;
+ ringbuf->head = I915_READ_HEAD(ring);
+ ringbuf->space = ring_space(ring);
+ if (ringbuf->space >= n) {
+ ret = 0;
+ break;
}
if (!drm_core_check_feature(dev, DRIVER_MODESET) &&
@@ -1571,38 +1593,49 @@
msleep(1);
+ if (dev_priv->mm.interruptible && signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
- return ret;
- } while (!time_after(jiffies, end));
+ break;
+
+ if (time_after(jiffies, end)) {
+ ret = -EBUSY;
+ break;
+ }
+ } while (1);
trace_i915_ring_wait_end(ring);
- return -EBUSY;
+ return ret;
}
-static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
+static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
{
uint32_t __iomem *virt;
- int rem = ring->size - ring->tail;
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+ int rem = ringbuf->size - ringbuf->tail;
- if (ring->space < rem) {
+ if (ringbuf->space < rem) {
int ret = ring_wait_for_space(ring, rem);
if (ret)
return ret;
}
- virt = ring->virtual_start + ring->tail;
+ virt = ringbuf->virtual_start + ringbuf->tail;
rem /= 4;
while (rem--)
iowrite32(MI_NOOP, virt++);
- ring->tail = 0;
- ring->space = ring_space(ring);
+ ringbuf->tail = 0;
+ ringbuf->space = ring_space(ring);
return 0;
}
-int intel_ring_idle(struct intel_ring_buffer *ring)
+int intel_ring_idle(struct intel_engine_cs *ring)
{
u32 seqno;
int ret;
@@ -1626,7 +1659,7 @@
}
static int
-intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
+intel_ring_alloc_seqno(struct intel_engine_cs *ring)
{
if (ring->outstanding_lazy_seqno)
return 0;
@@ -1644,18 +1677,19 @@
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
}
-static int __intel_ring_prepare(struct intel_ring_buffer *ring,
+static int __intel_ring_prepare(struct intel_engine_cs *ring,
int bytes)
{
+ struct intel_ringbuffer *ringbuf = ring->buffer;
int ret;
- if (unlikely(ring->tail + bytes > ring->effective_size)) {
+ if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
ret = intel_wrap_ring_buffer(ring);
if (unlikely(ret))
return ret;
}
- if (unlikely(ring->space < bytes)) {
+ if (unlikely(ringbuf->space < bytes)) {
ret = ring_wait_for_space(ring, bytes);
if (unlikely(ret))
return ret;
@@ -1664,7 +1698,7 @@
return 0;
}
-int intel_ring_begin(struct intel_ring_buffer *ring,
+int intel_ring_begin(struct intel_engine_cs *ring,
int num_dwords)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -1684,14 +1718,14 @@
if (ret)
return ret;
- ring->space -= num_dwords * sizeof(uint32_t);
+ ring->buffer->space -= num_dwords * sizeof(uint32_t);
return 0;
}
/* Align the ring tail to a cacheline boundary */
-int intel_ring_cacheline_align(struct intel_ring_buffer *ring)
+int intel_ring_cacheline_align(struct intel_engine_cs *ring)
{
- int num_dwords = (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+ int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
int ret;
if (num_dwords == 0)
@@ -1710,7 +1744,7 @@
return 0;
}
-void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
+void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -1727,7 +1761,7 @@
ring->hangcheck.seqno = seqno;
}
-static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
+static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
u32 value)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -1760,7 +1794,7 @@
_MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
}
-static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
+static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
u32 invalidate, u32 flush)
{
uint32_t cmd;
@@ -1796,7 +1830,7 @@
}
static int
-gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
unsigned flags)
{
@@ -1820,7 +1854,7 @@
}
static int
-hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
unsigned flags)
{
@@ -1841,7 +1875,7 @@
}
static int
-gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
unsigned flags)
{
@@ -1863,7 +1897,7 @@
/* Blitter support (SandyBridge+) */
-static int gen6_ring_flush(struct intel_ring_buffer *ring,
+static int gen6_ring_flush(struct intel_engine_cs *ring,
u32 invalidate, u32 flush)
{
struct drm_device *dev = ring->dev;
@@ -1906,7 +1940,7 @@
int intel_init_render_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
ring->name = "render ring";
ring->id = RCS;
@@ -2016,16 +2050,25 @@
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct intel_ringbuffer *ringbuf = ring->buffer;
int ret;
+ if (ringbuf == NULL) {
+ ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
+ if (!ringbuf)
+ return -ENOMEM;
+ ring->buffer = ringbuf;
+ }
+
ring->name = "render ring";
ring->id = RCS;
ring->mmio_base = RENDER_RING_BASE;
if (INTEL_INFO(dev)->gen >= 6) {
/* non-kms not supported on gen6+ */
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_ringbuf;
}
/* Note: gem is not supported on gen5/ilk without kms (the corresponding
@@ -2060,31 +2103,39 @@
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
- ring->size = size;
- ring->effective_size = ring->size;
+ ringbuf->size = size;
+ ringbuf->effective_size = ringbuf->size;
if (IS_I830(ring->dev) || IS_845G(ring->dev))
- ring->effective_size -= 2 * CACHELINE_BYTES;
+ ringbuf->effective_size -= 2 * CACHELINE_BYTES;
- ring->virtual_start = ioremap_wc(start, size);
- if (ring->virtual_start == NULL) {
+ ringbuf->virtual_start = ioremap_wc(start, size);
+ if (ringbuf->virtual_start == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_ringbuf;
}
if (!I915_NEED_GFX_HWS(dev)) {
ret = init_phys_status_page(ring);
if (ret)
- return ret;
+ goto err_vstart;
}
return 0;
+
+err_vstart:
+ iounmap(ringbuf->virtual_start);
+err_ringbuf:
+ kfree(ringbuf);
+ ring->buffer = NULL;
+ return ret;
}
int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
+ struct intel_engine_cs *ring = &dev_priv->ring[VCS];
ring->name = "bsd ring";
ring->id = VCS;
@@ -2160,7 +2211,7 @@
int intel_init_bsd2_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[VCS2];
+ struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
if ((INTEL_INFO(dev)->gen != 8)) {
DRM_ERROR("No dual-BSD ring on non-BDW machine\n");
@@ -2183,6 +2234,7 @@
ring->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer;
ring->semaphore.sync_to = gen6_ring_sync;
+ ring->semaphore.signal = gen6_signal;
/*
* The current semaphore is only applied on the pre-gen8. And there
* is no bsd2 ring on the pre-gen8. So now the semaphore_register
@@ -2208,7 +2260,7 @@
int intel_init_blt_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+ struct intel_engine_cs *ring = &dev_priv->ring[BCS];
ring->name = "blitter ring";
ring->id = BCS;
@@ -2257,7 +2309,7 @@
int intel_init_vebox_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[VECS];
+ struct intel_engine_cs *ring = &dev_priv->ring[VECS];
ring->name = "video enhancement ring";
ring->id = VECS;
@@ -2299,7 +2351,7 @@
}
int
-intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
+intel_ring_flush_all_caches(struct intel_engine_cs *ring)
{
int ret;
@@ -2317,7 +2369,7 @@
}
int
-intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
+intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
{
uint32_t flush_domains;
int ret;
@@ -2337,7 +2389,7 @@
}
void
-intel_stop_ring_buffer(struct intel_ring_buffer *ring)
+intel_stop_ring_buffer(struct intel_engine_cs *ring)
{
int ret;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 72c3c15..910c83c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,6 +1,10 @@
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
+#include <linux/hashtable.h>
+
+#define I915_CMD_HASH_ORDER 9
+
/*
* Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
* Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
@@ -54,7 +58,28 @@
bool deadlock;
};
-struct intel_ring_buffer {
+struct intel_ringbuffer {
+ struct drm_i915_gem_object *obj;
+ void __iomem *virtual_start;
+
+ u32 head;
+ u32 tail;
+ int space;
+ int size;
+ int effective_size;
+
+ /** We track the position of the requests in the ring buffer, and
+ * when each is retired we increment last_retired_head as the GPU
+ * must have finished processing the request and so we know we
+ * can advance the ringbuffer up to that position.
+ *
+ * last_retired_head is set to -1 after the value is consumed so
+ * we can detect new retirements.
+ */
+ u32 last_retired_head;
+};
+
+struct intel_engine_cs {
const char *name;
enum intel_ring_id {
RCS = 0x0,
@@ -66,57 +91,41 @@
#define I915_NUM_RINGS 5
#define LAST_USER_RING (VECS + 1)
u32 mmio_base;
- void __iomem *virtual_start;
struct drm_device *dev;
- struct drm_i915_gem_object *obj;
+ struct intel_ringbuffer *buffer;
- u32 head;
- u32 tail;
- int space;
- int size;
- int effective_size;
struct intel_hw_status_page status_page;
- /** We track the position of the requests in the ring buffer, and
- * when each is retired we increment last_retired_head as the GPU
- * must have finished processing the request and so we know we
- * can advance the ringbuffer up to that position.
- *
- * last_retired_head is set to -1 after the value is consumed so
- * we can detect new retirements.
- */
- u32 last_retired_head;
-
unsigned irq_refcount; /* protected by dev_priv->irq_lock */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
u32 trace_irq_seqno;
- bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
- void (*irq_put)(struct intel_ring_buffer *ring);
+ bool __must_check (*irq_get)(struct intel_engine_cs *ring);
+ void (*irq_put)(struct intel_engine_cs *ring);
- int (*init)(struct intel_ring_buffer *ring);
+ int (*init)(struct intel_engine_cs *ring);
- void (*write_tail)(struct intel_ring_buffer *ring,
+ void (*write_tail)(struct intel_engine_cs *ring,
u32 value);
- int __must_check (*flush)(struct intel_ring_buffer *ring,
+ int __must_check (*flush)(struct intel_engine_cs *ring,
u32 invalidate_domains,
u32 flush_domains);
- int (*add_request)(struct intel_ring_buffer *ring);
+ int (*add_request)(struct intel_engine_cs *ring);
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
* seen value is good enough. Note that the seqno will always be
* monotonic, even if not coherent.
*/
- u32 (*get_seqno)(struct intel_ring_buffer *ring,
+ u32 (*get_seqno)(struct intel_engine_cs *ring,
bool lazy_coherency);
- void (*set_seqno)(struct intel_ring_buffer *ring,
+ void (*set_seqno)(struct intel_engine_cs *ring,
u32 seqno);
- int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
+ int (*dispatch_execbuffer)(struct intel_engine_cs *ring,
u64 offset, u32 length,
unsigned flags);
#define I915_DISPATCH_SECURE 0x1
#define I915_DISPATCH_PINNED 0x2
- void (*cleanup)(struct intel_ring_buffer *ring);
+ void (*cleanup)(struct intel_engine_cs *ring);
struct {
u32 sync_seqno[I915_NUM_RINGS-1];
@@ -129,10 +138,10 @@
} mbox;
/* AKA wait() */
- int (*sync_to)(struct intel_ring_buffer *ring,
- struct intel_ring_buffer *to,
+ int (*sync_to)(struct intel_engine_cs *ring,
+ struct intel_engine_cs *to,
u32 seqno);
- int (*signal)(struct intel_ring_buffer *signaller,
+ int (*signal)(struct intel_engine_cs *signaller,
/* num_dwords needed by caller */
unsigned int num_dwords);
} semaphore;
@@ -165,8 +174,8 @@
wait_queue_head_t irq_queue;
- struct i915_hw_context *default_context;
- struct i915_hw_context *last_context;
+ struct intel_context *default_context;
+ struct intel_context *last_context;
struct intel_ring_hangcheck hangcheck;
@@ -176,12 +185,13 @@
volatile u32 *cpu_page;
} scratch;
+ bool needs_cmd_parser;
+
/*
- * Tables of commands the command parser needs to know about
+ * Table of commands the command parser needs to know about
* for this ring.
*/
- const struct drm_i915_cmd_table *cmd_tables;
- int cmd_table_count;
+ DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
/*
* Table of registers allowed in commands that read/write registers.
@@ -210,20 +220,20 @@
};
static inline bool
-intel_ring_initialized(struct intel_ring_buffer *ring)
+intel_ring_initialized(struct intel_engine_cs *ring)
{
- return ring->obj != NULL;
+ return ring->buffer && ring->buffer->obj;
}
static inline unsigned
-intel_ring_flag(struct intel_ring_buffer *ring)
+intel_ring_flag(struct intel_engine_cs *ring)
{
return 1 << ring->id;
}
static inline u32
-intel_ring_sync_index(struct intel_ring_buffer *ring,
- struct intel_ring_buffer *other)
+intel_ring_sync_index(struct intel_engine_cs *ring,
+ struct intel_engine_cs *other)
{
int idx;
@@ -241,7 +251,7 @@
}
static inline u32
-intel_read_status_page(struct intel_ring_buffer *ring,
+intel_read_status_page(struct intel_engine_cs *ring,
int reg)
{
/* Ensure that the compiler doesn't optimize away the load. */
@@ -250,7 +260,7 @@
}
static inline void
-intel_write_status_page(struct intel_ring_buffer *ring,
+intel_write_status_page(struct intel_engine_cs *ring,
int reg, u32 value)
{
ring->status_page.page_addr[reg] = value;
@@ -275,27 +285,29 @@
#define I915_GEM_HWS_SCRATCH_INDEX 0x30
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
-void intel_stop_ring_buffer(struct intel_ring_buffer *ring);
-void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
+void intel_stop_ring_buffer(struct intel_engine_cs *ring);
+void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
-int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
-int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring);
-static inline void intel_ring_emit(struct intel_ring_buffer *ring,
+int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
+int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
+static inline void intel_ring_emit(struct intel_engine_cs *ring,
u32 data)
{
- iowrite32(data, ring->virtual_start + ring->tail);
- ring->tail += 4;
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+ iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
+ ringbuf->tail += 4;
}
-static inline void intel_ring_advance(struct intel_ring_buffer *ring)
+static inline void intel_ring_advance(struct intel_engine_cs *ring)
{
- ring->tail &= ring->size - 1;
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+ ringbuf->tail &= ringbuf->size - 1;
}
-void __intel_ring_advance(struct intel_ring_buffer *ring);
+void __intel_ring_advance(struct intel_engine_cs *ring);
-int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
-void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
-int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
-int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
+int __must_check intel_ring_idle(struct intel_engine_cs *ring);
+void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
+int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
+int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
@@ -303,21 +315,21 @@
int intel_init_blt_ring_buffer(struct drm_device *dev);
int intel_init_vebox_ring_buffer(struct drm_device *dev);
-u64 intel_ring_get_active_head(struct intel_ring_buffer *ring);
-void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
+u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
+void intel_ring_setup_status_page(struct intel_engine_cs *ring);
-static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
+static inline u32 intel_ring_get_tail(struct intel_engine_cs *ring)
{
- return ring->tail;
+ return ring->buffer->tail;
}
-static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
+static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
{
BUG_ON(ring->outstanding_lazy_seqno == 0);
return ring->outstanding_lazy_seqno;
}
-static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
+static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno)
{
if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
ring->trace_irq_seqno = seqno;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 2bf09e8..6a4d5bc 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1153,20 +1153,21 @@
pipe_config->pixel_multiplier =
intel_sdvo_get_pixel_multiplier(adjusted_mode);
+ pipe_config->has_hdmi_sink = intel_sdvo->has_hdmi_monitor;
+
if (intel_sdvo->color_range_auto) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
/* FIXME: This bit is only valid when using TMDS encoding and 8
* bit per color mode. */
- if (intel_sdvo->has_hdmi_monitor &&
+ if (pipe_config->has_hdmi_sink &&
drm_match_cea_mode(adjusted_mode) > 1)
- intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235;
- else
- intel_sdvo->color_range = 0;
+ pipe_config->limited_color_range = true;
+ } else {
+ if (pipe_config->has_hdmi_sink &&
+ intel_sdvo->color_range == HDMI_COLOR_RANGE_16_235)
+ pipe_config->limited_color_range = true;
}
- if (intel_sdvo->color_range)
- pipe_config->limited_color_range = true;
-
/* Clock computation needs to happen after pixel multiplier. */
if (intel_sdvo->is_tv)
i9xx_adjust_sdvo_tv_clock(pipe_config);
@@ -1223,7 +1224,7 @@
if (!intel_sdvo_set_target_input(intel_sdvo))
return;
- if (intel_sdvo->has_hdmi_monitor) {
+ if (crtc->config.has_hdmi_sink) {
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
@@ -1258,8 +1259,8 @@
/* The real mode polarity is set by the SDVO commands, using
* struct intel_sdvo_dtd. */
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
- if (!HAS_PCH_SPLIT(dev) && intel_sdvo->is_hdmi)
- sdvox |= intel_sdvo->color_range;
+ if (!HAS_PCH_SPLIT(dev) && crtc->config.limited_color_range)
+ sdvox |= HDMI_COLOR_RANGE_16_235;
if (INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_BORDER_ENABLE;
} else {
@@ -1349,6 +1350,8 @@
u8 val;
bool ret;
+ sdvox = I915_READ(intel_sdvo->sdvo_reg);
+
ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd);
if (!ret) {
/* Some sdvo encoders are not spec compliant and don't
@@ -1377,7 +1380,6 @@
* other platfroms.
*/
if (IS_I915G(dev) || IS_I915GM(dev)) {
- sdvox = I915_READ(intel_sdvo->sdvo_reg);
pipe_config->pixel_multiplier =
((sdvox & SDVO_PORT_MULTIPLY_MASK)
>> SDVO_PORT_MULTIPLY_SHIFT) + 1;
@@ -1406,6 +1408,15 @@
}
}
+ if (sdvox & HDMI_COLOR_RANGE_16_235)
+ pipe_config->limited_color_range = true;
+
+ if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
+ &val, 1)) {
+ if (val == SDVO_ENCODE_HDMI)
+ pipe_config->has_hdmi_sink = true;
+ }
+
WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
"SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
pipe_config->pixel_multiplier, encoder_pixel_multiplier);
@@ -1732,7 +1743,7 @@
enum drm_connector_status ret;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, drm_get_connector_name(connector));
+ connector->base.id, connector->name);
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_ATTACHED_DISPLAYS,
@@ -1794,7 +1805,7 @@
struct edid *edid;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, drm_get_connector_name(connector));
+ connector->base.id, connector->name);
/* set the bus switch and get the modes */
edid = intel_sdvo_get_edid(connector);
@@ -1892,7 +1903,7 @@
int i;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, drm_get_connector_name(connector));
+ connector->base.id, connector->name);
/* Read the list of supported input resolutions for the selected TV
* format.
@@ -1929,7 +1940,7 @@
struct drm_display_mode *newmode;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, drm_get_connector_name(connector));
+ connector->base.id, connector->name);
/*
* Fetch modes from VBT. For SDVO prefer the VBT mode since some
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index b1a5514..01d841e 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -29,12 +29,21 @@
* IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
* VLV_VLV2_PUNIT_HAS_0.8.docx
*/
+
+/* Standard MMIO read, non-posted */
+#define SB_MRD_NP 0x00
+/* Standard MMIO write, non-posted */
+#define SB_MWR_NP 0x01
+/* Private register read, double-word addressing, non-posted */
+#define SB_CRRDDA_NP 0x06
+/* Private register write, double-word addressing, non-posted */
+#define SB_CRWRDA_NP 0x07
+
static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
u32 port, u32 opcode, u32 addr, u32 *val)
{
u32 cmd, be = 0xf, bar = 0;
- bool is_read = (opcode == PUNIT_OPCODE_REG_READ ||
- opcode == DPIO_OPCODE_REG_READ);
+ bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
(port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
@@ -74,7 +83,7 @@
mutex_lock(&dev_priv->dpio_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
- PUNIT_OPCODE_REG_READ, addr, &val);
+ SB_CRRDDA_NP, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
return val;
@@ -86,7 +95,7 @@
mutex_lock(&dev_priv->dpio_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
- PUNIT_OPCODE_REG_WRITE, addr, &val);
+ SB_CRWRDA_NP, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
}
@@ -95,7 +104,7 @@
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
- PUNIT_OPCODE_REG_READ, reg, &val);
+ SB_CRRDDA_NP, reg, &val);
return val;
}
@@ -103,7 +112,7 @@
void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
- PUNIT_OPCODE_REG_WRITE, reg, &val);
+ SB_CRWRDA_NP, reg, &val);
}
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
@@ -114,7 +123,7 @@
mutex_lock(&dev_priv->dpio_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_NC,
- PUNIT_OPCODE_REG_READ, addr, &val);
+ SB_CRRDDA_NP, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
return val;
@@ -124,56 +133,56 @@
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
- PUNIT_OPCODE_REG_READ, reg, &val);
+ SB_CRRDDA_NP, reg, &val);
return val;
}
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
- PUNIT_OPCODE_REG_WRITE, reg, &val);
+ SB_CRWRDA_NP, reg, &val);
}
u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
- PUNIT_OPCODE_REG_READ, reg, &val);
+ SB_CRRDDA_NP, reg, &val);
return val;
}
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
- PUNIT_OPCODE_REG_WRITE, reg, &val);
+ SB_CRWRDA_NP, reg, &val);
}
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
- PUNIT_OPCODE_REG_READ, reg, &val);
+ SB_CRRDDA_NP, reg, &val);
return val;
}
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
- PUNIT_OPCODE_REG_WRITE, reg, &val);
+ SB_CRWRDA_NP, reg, &val);
}
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
- PUNIT_OPCODE_REG_READ, reg, &val);
+ SB_CRRDDA_NP, reg, &val);
return val;
}
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
- PUNIT_OPCODE_REG_WRITE, reg, &val);
+ SB_CRWRDA_NP, reg, &val);
}
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
@@ -181,7 +190,7 @@
u32 val = 0;
vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
- DPIO_OPCODE_REG_READ, reg, &val);
+ SB_MRD_NP, reg, &val);
/*
* FIXME: There might be some registers where all 1's is a valid value,
@@ -196,7 +205,7 @@
void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
{
vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
- DPIO_OPCODE_REG_WRITE, reg, &val);
+ SB_MWR_NP, reg, &val);
}
/* SBI access */
@@ -261,13 +270,13 @@
u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI,
- DPIO_OPCODE_REG_READ, reg, &val);
+ vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP,
+ reg, &val);
return val;
}
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI,
- DPIO_OPCODE_REG_WRITE, reg, &val);
+ vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
+ reg, &val);
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 213cd58..d6acd6b 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -696,10 +696,7 @@
* when going from primary only to sprite only and vice
* versa.
*/
- if (intel_crtc->config.ips_enabled) {
- intel_wait_for_vblank(dev, intel_crtc->pipe);
- hsw_enable_ips(intel_crtc);
- }
+ hsw_enable_ips(intel_crtc);
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
@@ -1021,6 +1018,9 @@
intel_crtc->primary_enabled = primary_enabled;
+ if (primary_was_enabled != primary_enabled)
+ intel_crtc_wait_for_pending_flips(crtc);
+
if (primary_was_enabled && !primary_enabled)
intel_pre_disable_primary(crtc);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index e0193e8..25850a8 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1314,7 +1314,7 @@
int type;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
- connector->base.id, drm_get_connector_name(connector),
+ connector->base.id, connector->name,
force);
mode = reported_modes[0];
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 76dc185..9cd99d9 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -921,7 +921,7 @@
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_reset_stats *args = data;
struct i915_ctx_hang_stats *hs;
- struct i915_hw_context *ctx;
+ struct intel_context *ctx;
int ret;
if (args->flags || args->pad)
@@ -976,7 +976,6 @@
if (ret)
return ret;
- /* We can't reset render&media without also resetting display ... */
pci_write_config_byte(dev->pdev, I965_GDRST,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
@@ -989,26 +988,58 @@
return 0;
}
-static int ironlake_do_reset(struct drm_device *dev)
+static int g4x_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 gdrst;
int ret;
- gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
- gdrst &= ~GRDOM_MASK;
- I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
- gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
- ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
+ pci_write_config_byte(dev->pdev, I965_GDRST,
+ GRDOM_RENDER | GRDOM_RESET_ENABLE);
+ ret = wait_for(i965_reset_complete(dev), 500);
if (ret)
return ret;
- /* We can't reset render&media without also resetting display ... */
- gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
- gdrst &= ~GRDOM_MASK;
+ /* WaVcpClkGateDisableForMediaReset:ctg,elk */
+ I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
+ POSTING_READ(VDECCLK_GATE_D);
+
+ pci_write_config_byte(dev->pdev, I965_GDRST,
+ GRDOM_MEDIA | GRDOM_RESET_ENABLE);
+ ret = wait_for(i965_reset_complete(dev), 500);
+ if (ret)
+ return ret;
+
+ /* WaVcpClkGateDisableForMediaReset:ctg,elk */
+ I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
+ POSTING_READ(VDECCLK_GATE_D);
+
+ pci_write_config_byte(dev->pdev, I965_GDRST, 0);
+
+ return 0;
+}
+
+static int ironlake_do_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
- gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
- return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
+ ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
+ ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
+ ILK_GRDOM_RESET_ENABLE) == 0, 500);
+ if (ret)
+ return ret;
+
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+ ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
+ ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
+ ILK_GRDOM_RESET_ENABLE) == 0, 500);
+ if (ret)
+ return ret;
+
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0);
+
+ return 0;
}
static int gen6_do_reset(struct drm_device *dev)
@@ -1039,7 +1070,11 @@
case 7:
case 6: return gen6_do_reset(dev);
case 5: return ironlake_do_reset(dev);
- case 4: return i965_do_reset(dev);
+ case 4:
+ if (IS_G4X(dev))
+ return g4x_do_reset(dev);
+ else
+ return i965_do_reset(dev);
default: return -ENODEV;
}
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index 434b920..a96dda4 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -414,7 +414,7 @@
helper->dpms(encoder, DRM_MODE_DPMS_ON);
NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
- drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
+ nouveau_encoder_connector_get(nv_encoder)->base.name,
nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index a2d669b..e57babb 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -477,7 +477,7 @@
helper->dpms(encoder, DRM_MODE_DPMS_ON);
NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
- drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
+ nouveau_encoder_connector_get(nv_encoder)->base.name,
nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 2f1ed61..4342fda 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -115,7 +115,7 @@
&dev->mode_config.connector_list, head) {
if (!connector->encoder_ids[0]) {
NV_WARN(drm, "%s has no encoders, removing\n",
- drm_get_connector_name(connector));
+ connector->name);
connector->funcs->destroy(connector);
}
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 244822d..8667620 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -171,7 +171,8 @@
helper->dpms(encoder, DRM_MODE_DPMS_ON);
NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
- drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+ nouveau_encoder_connector_get(nv_encoder)->base.name,
+ nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
static void nv04_tv_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index acef48f..195bd8e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -612,8 +612,7 @@
helper->dpms(encoder, DRM_MODE_DPMS_ON);
NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
- drm_get_connector_name(
- &nouveau_encoder_connector_get(nv_encoder)->base),
+ nouveau_encoder_connector_get(nv_encoder)->base.name,
nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index d07ce02..6ecea9b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -265,14 +265,14 @@
nv_connector->edid);
if (!nv_connector->edid) {
NV_ERROR(drm, "DDC responded, but no EDID for %s\n",
- drm_get_connector_name(connector));
+ connector->name);
goto detect_analog;
}
if (nv_encoder->dcb->type == DCB_OUTPUT_DP &&
!nouveau_dp_detect(to_drm_encoder(nv_encoder))) {
NV_ERROR(drm, "Detected %s, but failed init\n",
- drm_get_connector_name(connector));
+ connector->name);
conn_status = connector_status_disconnected;
goto out;
}
@@ -437,7 +437,7 @@
nv_encoder = find_encoder(connector, type);
if (!nv_encoder) {
NV_ERROR(drm, "can't find encoder to force %s on!\n",
- drm_get_connector_name(connector));
+ connector->name);
connector->status = connector_status_disconnected;
return;
}
@@ -923,7 +923,7 @@
bool plugged = gpio->get(gpio, 0, nv_connector->hpd.func, 0xff);
NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un",
- drm_get_connector_name(connector));
+ connector->name);
if (plugged)
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 58af547..79f6dc7 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -2295,7 +2295,7 @@
continue;
NV_WARN(drm, "%s has no encoders, removing\n",
- drm_get_connector_name(connector));
+ connector->name);
connector->funcs->destroy(connector);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 8b01960..2a5cacd 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -346,6 +346,7 @@
VERB("flush: %d,%d %dx%d, fb=%p", x, y, w, h, fb);
+ /* FIXME: This is racy - no protection against modeset config changes. */
while ((connector = omap_framebuffer_get_next_connector(fb, connector))) {
/* only consider connectors that are part of a chain */
if (connector->encoder && connector->encoder->crtc) {
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 3ab9072..5d7ea24 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -574,6 +574,10 @@
bo->surf.height, bo->surf.stride, bo->surf.format);
qxl_io_create_primary(qdev, base_offset, bo);
bo->is_primary = true;
+ }
+
+ if (bo->is_primary) {
+ DRM_DEBUG_KMS("setting surface_id to 0 for primary surface %d on crtc %d\n", bo->surface_id, qcrtc->index);
surf_id = 0;
} else {
surf_id = bo->surface_id;
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 0943353..dbcbfe8 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -72,7 +72,7 @@
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \
- radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
+ radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o dce3_1_afmt.o \
evergreen.o evergreen_cs.o evergreen_blit_shaders.o \
evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index fb187c7..967d193 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -559,6 +559,7 @@
u32 adjusted_clock = mode->clock;
int encoder_mode = atombios_get_encoder_mode(encoder);
u32 dp_clock = mode->clock;
+ u32 clock = mode->clock;
int bpc = radeon_crtc->bpc;
bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
@@ -634,6 +635,24 @@
radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
}
+ /* adjust pll for deep color modes */
+ if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+ switch (bpc) {
+ case 8:
+ default:
+ break;
+ case 10:
+ clock = (clock * 5) / 4;
+ break;
+ case 12:
+ clock = (clock * 3) / 2;
+ break;
+ case 16:
+ clock = clock * 2;
+ break;
+ }
+ }
+
/* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock
* accordingly based on the encoder/transmitter to work around
* special hw requirements.
@@ -655,7 +674,7 @@
switch (crev) {
case 1:
case 2:
- args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v1.usPixelClock = cpu_to_le16(clock / 10);
args.v1.ucTransmitterID = radeon_encoder->encoder_id;
args.v1.ucEncodeMode = encoder_mode;
if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
@@ -667,7 +686,7 @@
adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
break;
case 3:
- args.v3.sInput.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v3.sInput.usPixelClock = cpu_to_le16(clock / 10);
args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
args.v3.sInput.ucEncodeMode = encoder_mode;
args.v3.sInput.ucDispPllConfig = 0;
@@ -681,10 +700,6 @@
args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
} else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- if (encoder_mode == ATOM_ENCODER_MODE_HDMI)
- /* deep color support */
- args.v3.sInput.usPixelClock =
- cpu_to_le16((mode->clock * bpc / 8) / 10);
if (dig->coherent_mode)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
@@ -864,14 +879,21 @@
args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC;
- switch (bpc) {
- case 8:
- default:
- args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
- break;
- case 10:
- args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
- break;
+ if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+ switch (bpc) {
+ case 8:
+ default:
+ args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
+ break;
+ case 10:
+ /* yes this is correct, the atom define is wrong */
+ args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_32BPP;
+ break;
+ case 12:
+ /* yes this is correct, the atom define is wrong */
+ args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
+ break;
+ }
}
args.v5.ucTransmitterID = encoder_id;
args.v5.ucEncoderMode = encoder_mode;
@@ -886,20 +908,22 @@
args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
- switch (bpc) {
- case 8:
- default:
- args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
- break;
- case 10:
- args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP;
- break;
- case 12:
- args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP;
- break;
- case 16:
- args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
- break;
+ if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+ switch (bpc) {
+ case 8:
+ default:
+ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
+ break;
+ case 10:
+ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6;
+ break;
+ case 12:
+ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6;
+ break;
+ case 16:
+ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
+ break;
+ }
}
args.v6.ucTransmitterID = encoder_id;
args.v6.ucEncoderMode = encoder_mode;
@@ -1021,10 +1045,17 @@
struct radeon_encoder *radeon_encoder =
to_radeon_encoder(radeon_crtc->encoder);
u32 pll_clock = mode->clock;
+ u32 clock = mode->clock;
u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
struct radeon_pll *pll;
int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
+ /* pass the actual clock to atombios_crtc_program_pll for DCE5,6 for HDMI */
+ if (ASIC_IS_DCE5(rdev) && !ASIC_IS_DCE8(rdev) &&
+ (encoder_mode == ATOM_ENCODER_MODE_HDMI) &&
+ (radeon_crtc->bpc > 8))
+ clock = radeon_crtc->adjusted_clock;
+
switch (radeon_crtc->pll_id) {
case ATOM_PPLL1:
pll = &rdev->clock.p1pll;
@@ -1059,7 +1090,7 @@
radeon_crtc->crtc_id, &radeon_crtc->ss);
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
- encoder_mode, radeon_encoder->encoder_id, mode->clock,
+ encoder_mode, radeon_encoder->encoder_id, clock,
ref_div, fb_div, frac_fb_div, post_div,
radeon_crtc->bpc, radeon_crtc->ss_enabled, &radeon_crtc->ss);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index bc0119f..225f6c6 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -95,9 +95,12 @@
int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
unsigned char *base;
int recv_bytes;
+ int r = 0;
memset(&args, 0, sizeof(args));
+ mutex_lock(&chan->mutex);
+
base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
radeon_atom_copy_swap(base, send, send_bytes, true);
@@ -117,19 +120,22 @@
/* timeout */
if (args.v1.ucReplyStatus == 1) {
DRM_DEBUG_KMS("dp_aux_ch timeout\n");
- return -ETIMEDOUT;
+ r = -ETIMEDOUT;
+ goto done;
}
/* flags not zero */
if (args.v1.ucReplyStatus == 2) {
DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
- return -EBUSY;
+ r = -EBUSY;
+ goto done;
}
/* error */
if (args.v1.ucReplyStatus == 3) {
DRM_DEBUG_KMS("dp_aux_ch error\n");
- return -EIO;
+ r = -EIO;
+ goto done;
}
recv_bytes = args.v1.ucDataOutLen;
@@ -139,7 +145,11 @@
if (recv && recv_size)
radeon_atom_copy_swap(recv, base + 16, recv_bytes, false);
- return recv_bytes;
+ r = recv_bytes;
+done:
+ mutex_unlock(&chan->mutex);
+
+ return r;
}
#define BARE_ADDRESS_SIZE 3
@@ -281,6 +291,19 @@
/***** radeon specific DP functions *****/
+static int radeon_dp_get_max_link_rate(struct drm_connector *connector,
+ u8 dpcd[DP_DPCD_SIZE])
+{
+ int max_link_rate;
+
+ if (radeon_connector_is_dp12_capable(connector))
+ max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000);
+ else
+ max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000);
+
+ return max_link_rate;
+}
+
/* First get the min lane# when low rate is used according to pixel clock
* (prefer low rate), second check max lane# supported by DP panel,
* if the max lane# < low rate lane# then use max lane# instead.
@@ -290,7 +313,7 @@
int pix_clock)
{
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
- int max_link_rate = drm_dp_max_link_rate(dpcd);
+ int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd);
int max_lane_num = drm_dp_max_lane_count(dpcd);
int lane_num;
int max_dp_pix_clock;
@@ -328,7 +351,7 @@
return 540000;
}
- return drm_dp_max_link_rate(dpcd);
+ return radeon_dp_get_max_link_rate(connector, dpcd);
}
static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index e6eb509..2b29084 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1884,8 +1884,11 @@
args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
else
args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
- } else
+ } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
+ } else {
args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
+ }
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index b5162c3..9c570fb 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -43,15 +43,19 @@
int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
unsigned char *base;
u16 out = cpu_to_le16(0);
+ int r = 0;
memset(&args, 0, sizeof(args));
+ mutex_lock(&chan->mutex);
+
base = (unsigned char *)rdev->mode_info.atom_context->scratch;
if (flags & HW_I2C_WRITE) {
if (num > ATOM_MAX_HW_I2C_WRITE) {
DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num);
- return -EINVAL;
+ r = -EINVAL;
+ goto done;
}
if (buf == NULL)
args.ucRegIndex = 0;
@@ -65,7 +69,8 @@
} else {
if (num > ATOM_MAX_HW_I2C_READ) {
DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
- return -EINVAL;
+ r = -EINVAL;
+ goto done;
}
args.ucRegIndex = 0;
args.lpI2CDataOut = 0;
@@ -82,13 +87,17 @@
/* error */
if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
DRM_DEBUG_KMS("hw_i2c error\n");
- return -EIO;
+ r = -EIO;
+ goto done;
}
if (!(flags & HW_I2C_WRITE))
radeon_atom_copy_swap(buf, base, num, false);
- return 0;
+done:
+ mutex_unlock(&chan->mutex);
+
+ return r;
}
int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 199eb19..a518140 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -5328,6 +5328,7 @@
WREG32(MC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
ENABLE_L1_TLB |
+ ENABLE_L1_FRAGMENT_PROCESSING |
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
ENABLE_ADVANCED_DRIVER_MODEL |
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
@@ -5340,7 +5341,8 @@
CONTEXT1_IDENTITY_ACCESS_MODE(1));
WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
- L2_CACHE_BIGK_FRAGMENT_SIZE(6));
+ BANK_SELECT(4) |
+ L2_CACHE_BIGK_FRAGMENT_SIZE(4));
/* setup context0 */
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
@@ -5376,6 +5378,7 @@
(u32)(rdev->dummy_page.addr >> 12));
WREG32(VM_CONTEXT1_CNTL2, 4);
WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
+ PAGE_TABLE_BLOCK_SIZE(RADEON_VM_BLOCK_SIZE - 9) |
RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
@@ -7311,7 +7314,7 @@
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_flip(rdev, 0);
+ radeon_crtc_handle_vblank(rdev, 0);
rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
}
@@ -7337,7 +7340,7 @@
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_flip(rdev, 1);
+ radeon_crtc_handle_vblank(rdev, 1);
rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
}
@@ -7363,7 +7366,7 @@
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[2]))
- radeon_crtc_handle_flip(rdev, 2);
+ radeon_crtc_handle_vblank(rdev, 2);
rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D3 vblank\n");
}
@@ -7389,7 +7392,7 @@
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[3]))
- radeon_crtc_handle_flip(rdev, 3);
+ radeon_crtc_handle_vblank(rdev, 3);
rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D4 vblank\n");
}
@@ -7415,7 +7418,7 @@
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[4]))
- radeon_crtc_handle_flip(rdev, 4);
+ radeon_crtc_handle_vblank(rdev, 4);
rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D5 vblank\n");
}
@@ -7441,7 +7444,7 @@
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[5]))
- radeon_crtc_handle_flip(rdev, 5);
+ radeon_crtc_handle_vblank(rdev, 5);
rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D6 vblank\n");
}
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index f7e46cf..3c2407b 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -741,7 +741,26 @@
trace_radeon_vm_set_page(pe, addr, count, incr, flags);
- if (flags & R600_PTE_SYSTEM) {
+ if (flags == R600_PTE_GART) {
+ uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
+ while (count) {
+ unsigned bytes = count * 8;
+ if (bytes > 0x1FFFF8)
+ bytes = 0x1FFFF8;
+
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib->ptr[ib->length_dw++] = bytes;
+ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+ ib->ptr[ib->length_dw++] = src & 0xffffffff;
+ ib->ptr[ib->length_dw++] = upper_32_bits(src);
+ ib->ptr[ib->length_dw++] = pe & 0xffffffff;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+
+ pe += bytes;
+ src += bytes;
+ count -= bytes / 8;
+ }
+ } else if (flags & R600_PTE_SYSTEM) {
while (count) {
ndw = count * 2;
if (ndw > 0xFFFFE)
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 2138732..0b27ea0 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -482,6 +482,7 @@
#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
+#define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24)
#define VM_CONTEXT1_CNTL 0x1414
#define VM_CONTEXT0_CNTL2 0x1430
#define VM_CONTEXT1_CNTL2 0x1434
diff --git a/drivers/gpu/drm/radeon/clearstate_cayman.h b/drivers/gpu/drm/radeon/clearstate_cayman.h
index aa908c5..e48a140 100644
--- a/drivers/gpu/drm/radeon/clearstate_cayman.h
+++ b/drivers/gpu/drm/radeon/clearstate_cayman.h
@@ -1050,7 +1050,7 @@
{SECT_CONTEXT_def_5, 0x0000a29e, 5 },
{SECT_CONTEXT_def_6, 0x0000a2a5, 56 },
{SECT_CONTEXT_def_7, 0x0000a2de, 290 },
- { 0, 0, 0 }
+ { NULL, 0, 0 }
};
static const u32 SECT_CLEAR_def_1[] =
{
@@ -1061,7 +1061,7 @@
static const struct cs_extent_def SECT_CLEAR_defs[] =
{
{SECT_CLEAR_def_1, 0x0000ffc0, 3 },
- { 0, 0, 0 }
+ { NULL, 0, 0 }
};
static const u32 SECT_CTRLCONST_def_1[] =
{
@@ -1071,11 +1071,11 @@
static const struct cs_extent_def SECT_CTRLCONST_defs[] =
{
{SECT_CTRLCONST_def_1, 0x0000f3fc, 2 },
- { 0, 0, 0 }
+ { NULL, 0, 0 }
};
static const struct cs_section_def cayman_cs_data[] = {
{ SECT_CONTEXT_defs, SECT_CONTEXT },
{ SECT_CLEAR_defs, SECT_CLEAR },
{ SECT_CTRLCONST_defs, SECT_CTRLCONST },
- { 0, SECT_NONE }
+ { NULL, SECT_NONE }
};
diff --git a/drivers/gpu/drm/radeon/clearstate_ci.h b/drivers/gpu/drm/radeon/clearstate_ci.h
index c3982f9..f55d066 100644
--- a/drivers/gpu/drm/radeon/clearstate_ci.h
+++ b/drivers/gpu/drm/radeon/clearstate_ci.h
@@ -936,9 +936,9 @@
{ci_SECT_CONTEXT_def_5, 0x0000a2a0, 2 },
{ci_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
{ci_SECT_CONTEXT_def_7, 0x0000a2a5, 233 },
- { 0, 0, 0 }
+ { NULL, 0, 0 }
};
static const struct cs_section_def ci_cs_data[] = {
{ ci_SECT_CONTEXT_defs, SECT_CONTEXT },
- { 0, SECT_NONE }
+ { NULL, SECT_NONE }
};
diff --git a/drivers/gpu/drm/radeon/clearstate_si.h b/drivers/gpu/drm/radeon/clearstate_si.h
index b994cb2..66e39cd 100644
--- a/drivers/gpu/drm/radeon/clearstate_si.h
+++ b/drivers/gpu/drm/radeon/clearstate_si.h
@@ -933,9 +933,9 @@
{si_SECT_CONTEXT_def_5, 0x0000a2a1, 1 },
{si_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
{si_SECT_CONTEXT_def_7, 0x0000a2a5, 233 },
- { 0, 0, 0 }
+ { NULL, 0, 0 }
};
static const struct cs_section_def si_cs_data[] = {
{ si_SECT_CONTEXT_defs, SECT_CONTEXT },
- { 0, SECT_NONE }
+ { NULL, SECT_NONE }
};
diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
new file mode 100644
index 0000000..51800e3
--- /dev/null
+++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ * Copyright 2014 Rafał Miłecki
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <linux/hdmi.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "r600d.h"
+
+static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ u32 tmp;
+ u8 *sadb;
+ int sad_count;
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ radeon_connector = to_radeon_connector(connector);
+ break;
+ }
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
+ if (sad_count < 0) {
+ DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
+ return;
+ }
+
+ /* program the speaker allocation */
+ tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
+ tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
+ /* set HDMI mode */
+ tmp |= HDMI_CONNECTION;
+ if (sad_count)
+ tmp |= SPEAKER_ALLOCATION(sadb[0]);
+ else
+ tmp |= SPEAKER_ALLOCATION(5); /* stereo */
+ WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
+
+ kfree(sadb);
+}
+
+static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ struct cea_sad *sads;
+ int i, sad_count;
+
+ static const u16 eld_reg_to_type[][2] = {
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
+ };
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ radeon_connector = to_radeon_connector(connector);
+ break;
+ }
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
+ if (sad_count < 0) {
+ DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ return;
+ }
+ BUG_ON(!sads);
+
+ for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
+ u32 value = 0;
+ u8 stereo_freqs = 0;
+ int max_channels = -1;
+ int j;
+
+ for (j = 0; j < sad_count; j++) {
+ struct cea_sad *sad = &sads[j];
+
+ if (sad->format == eld_reg_to_type[i][1]) {
+ if (sad->channels > max_channels) {
+ value = MAX_CHANNELS(sad->channels) |
+ DESCRIPTOR_BYTE_2(sad->byte2) |
+ SUPPORTED_FREQUENCIES(sad->freq);
+ max_channels = sad->channels;
+ }
+
+ if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
+ stereo_freqs |= sad->freq;
+ else
+ break;
+ }
+ }
+
+ value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
+
+ WREG32(eld_reg_to_type[i][0], value);
+ }
+
+ kfree(sads);
+}
+
+/*
+ * update the info frames with the data from the current display mode
+ */
+void dce3_1_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
+ struct hdmi_avi_infoframe frame;
+ uint32_t offset;
+ ssize_t err;
+
+ if (!dig || !dig->afmt)
+ return;
+
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (!dig->afmt->enabled)
+ return;
+ offset = dig->afmt->offset;
+
+ /* disable audio prior to setting up hw */
+ dig->afmt->pin = r600_audio_get_pin(rdev);
+ r600_audio_enable(rdev, dig->afmt->pin, false);
+
+ r600_audio_set_dto(encoder, mode->clock);
+
+ WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
+ HDMI0_NULL_SEND); /* send null packets when required */
+
+ WREG32(HDMI0_AUDIO_CRC_CONTROL + offset, 0x1000);
+
+ if (ASIC_IS_DCE32(rdev)) {
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
+ HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
+ HDMI0_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
+ AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
+ AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+ } else {
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
+ HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
+ HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
+ HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
+ HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+ }
+
+ if (ASIC_IS_DCE32(rdev)) {
+ dce3_2_afmt_write_speaker_allocation(encoder);
+ dce3_2_afmt_write_sad_regs(encoder);
+ }
+
+ WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
+ HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */
+ HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
+
+ WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
+ HDMI0_NULL_SEND | /* send null packets when required */
+ HDMI0_GC_SEND | /* send general control packets */
+ HDMI0_GC_CONT); /* send general control packets every frame */
+
+ /* TODO: HDMI0_AUDIO_INFO_UPDATE */
+ WREG32(HDMI0_INFOFRAME_CONTROL0 + offset,
+ HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */
+ HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+ HDMI0_AUDIO_INFO_CONT); /* send audio info frames every frame/field */
+
+ WREG32(HDMI0_INFOFRAME_CONTROL1 + offset,
+ HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */
+ HDMI0_AUDIO_INFO_LINE(2)); /* anything other than 0 */
+
+ WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */
+
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ if (err < 0) {
+ DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
+ return;
+ }
+
+ err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
+ return;
+ }
+
+ r600_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
+ r600_hdmi_update_ACR(encoder, mode->clock);
+
+ /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
+ WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF);
+ WREG32(HDMI0_RAMP_CONTROL1 + offset, 0x007FFFFF);
+ WREG32(HDMI0_RAMP_CONTROL2 + offset, 0x00000001);
+ WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
+
+ r600_hdmi_audio_workaround(encoder);
+
+ /* enable audio after to setting up hw */
+ r600_audio_enable(rdev, dig->afmt->pin, true);
+}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index b406546..0318230 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1301,36 +1301,6 @@
}
/**
- * radeon_irq_kms_pflip_irq_get - pre-pageflip callback.
- *
- * @rdev: radeon_device pointer
- * @crtc: crtc to prepare for pageflip on
- *
- * Pre-pageflip callback (evergreen+).
- * Enables the pageflip irq (vblank irq).
- */
-void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
-{
- /* enable the pflip int */
- radeon_irq_kms_pflip_irq_get(rdev, crtc);
-}
-
-/**
- * evergreen_post_page_flip - pos-pageflip callback.
- *
- * @rdev: radeon_device pointer
- * @crtc: crtc to cleanup pageflip on
- *
- * Post-pageflip callback (evergreen+).
- * Disables the pageflip irq (vblank irq).
- */
-void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
-{
- /* disable the pflip int */
- radeon_irq_kms_pflip_irq_put(rdev, crtc);
-}
-
-/**
* evergreen_page_flip - pageflip callback.
*
* @rdev: radeon_device pointer
@@ -1343,7 +1313,7 @@
* double buffered update to take place.
* Returns the current update pending status.
*/
-u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+void evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
@@ -1375,9 +1345,23 @@
/* Unlock the lock, so double-buffering can take place inside vblank */
tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+}
+
+/**
+ * evergreen_page_flip_pending - check if page flip is still pending
+ *
+ * @rdev: radeon_device pointer
+ * @crtc_id: crtc to check
+ *
+ * Returns the current update pending status.
+ */
+bool evergreen_page_flip_pending(struct radeon_device *rdev, int crtc_id)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
/* Return current update_pending status: */
- return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
+ return !!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) &
+ EVERGREEN_GRPH_SURFACE_UPDATE_PENDING);
}
/* get temperature in millidegrees */
@@ -4805,7 +4789,7 @@
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_flip(rdev, 0);
+ radeon_crtc_handle_vblank(rdev, 0);
rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
}
@@ -4831,7 +4815,7 @@
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_flip(rdev, 1);
+ radeon_crtc_handle_vblank(rdev, 1);
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
}
@@ -4857,7 +4841,7 @@
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[2]))
- radeon_crtc_handle_flip(rdev, 2);
+ radeon_crtc_handle_vblank(rdev, 2);
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D3 vblank\n");
}
@@ -4883,7 +4867,7 @@
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[3]))
- radeon_crtc_handle_flip(rdev, 3);
+ radeon_crtc_handle_vblank(rdev, 3);
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D4 vblank\n");
}
@@ -4909,7 +4893,7 @@
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[4]))
- radeon_crtc_handle_flip(rdev, 4);
+ radeon_crtc_handle_vblank(rdev, 4);
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D5 vblank\n");
}
@@ -4935,7 +4919,7 @@
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[5]))
- radeon_crtc_handle_flip(rdev, 5);
+ radeon_crtc_handle_vblank(rdev, 5);
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D6 vblank\n");
}
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 05b0c95..1ec0e6e 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -293,10 +293,13 @@
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
struct hdmi_avi_infoframe frame;
uint32_t offset;
ssize_t err;
+ uint32_t val;
+ int bpc = 8;
if (!dig || !dig->afmt)
return;
@@ -306,6 +309,12 @@
return;
offset = dig->afmt->offset;
+ /* hdmi deep color mode general control packets setup, if bpc > 8 */
+ if (encoder->crtc) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ bpc = radeon_crtc->bpc;
+ }
+
/* disable audio prior to setting up hw */
if (ASIC_IS_DCE6(rdev)) {
dig->afmt->pin = dce6_audio_get_pin(rdev);
@@ -322,6 +331,35 @@
WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
+ val = RREG32(HDMI_CONTROL + offset);
+ val &= ~HDMI_DEEP_COLOR_ENABLE;
+ val &= ~HDMI_DEEP_COLOR_DEPTH_MASK;
+
+ switch (bpc) {
+ case 0:
+ case 6:
+ case 8:
+ case 16:
+ default:
+ DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
+ connector->name, bpc);
+ break;
+ case 10:
+ val |= HDMI_DEEP_COLOR_ENABLE;
+ val |= HDMI_DEEP_COLOR_DEPTH(HDMI_30BIT_DEEP_COLOR);
+ DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
+ connector->name);
+ break;
+ case 12:
+ val |= HDMI_DEEP_COLOR_ENABLE;
+ val |= HDMI_DEEP_COLOR_DEPTH(HDMI_36BIT_DEEP_COLOR);
+ DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
+ connector->name);
+ break;
+ }
+
+ WREG32(HDMI_CONTROL + offset, val);
+
WREG32(HDMI_VBI_PACKET_CONTROL + offset,
HDMI_NULL_SEND | /* send null packets when required */
HDMI_GC_SEND | /* send general control packets */
@@ -348,9 +386,13 @@
/* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
- WREG32(HDMI_ACR_PACKET_CONTROL + offset,
- HDMI_ACR_SOURCE | /* select SW CTS value */
- HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
+ if (bpc > 8)
+ WREG32(HDMI_ACR_PACKET_CONTROL + offset,
+ HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
+ else
+ WREG32(HDMI_ACR_PACKET_CONTROL + offset,
+ HDMI_ACR_SOURCE | /* select SW CTS value */
+ HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
evergreen_hdmi_update_ACR(encoder, mode->clock);
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index f9c7963..b066d67 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -517,10 +517,11 @@
# define HDMI_ERROR_ACK (1 << 8)
# define HDMI_ERROR_MASK (1 << 9)
# define HDMI_DEEP_COLOR_ENABLE (1 << 24)
-# define HDMI_DEEP_COLOR_DEPTH (((x) & 3) << 28)
+# define HDMI_DEEP_COLOR_DEPTH(x) (((x) & 3) << 28)
# define HDMI_24BIT_DEEP_COLOR 0
# define HDMI_30BIT_DEEP_COLOR 1
# define HDMI_36BIT_DEEP_COLOR 2
+# define HDMI_DEEP_COLOR_DEPTH_MASK (3 << 28)
#define HDMI_STATUS 0x7034
# define HDMI_ACTIVE_AVMUTE (1 << 0)
# define HDMI_AUDIO_PACKET_ERROR (1 << 16)
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index d246e04..1d3209f 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1228,12 +1228,14 @@
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
+ ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
EFFECTIVE_L2_QUEUE_SIZE(7) |
CONTEXT1_IDENTITY_ACCESS_MODE(1));
WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+ BANK_SELECT(6) |
L2_CACHE_BIGK_FRAGMENT_SIZE(6));
/* setup context0 */
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
@@ -1266,6 +1268,7 @@
(u32)(rdev->dummy_page.addr >> 12));
WREG32(VM_CONTEXT1_CNTL2, 4);
WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
+ PAGE_TABLE_BLOCK_SIZE(RADEON_VM_BLOCK_SIZE - 9) |
RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index d996033..2e12e4d 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -128,6 +128,7 @@
#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
+#define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24)
#define VM_CONTEXT1_CNTL 0x1414
#define VM_CONTEXT0_CNTL2 0x1430
#define VM_CONTEXT1_CNTL2 0x1434
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index b6c3264..ad99813 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -142,36 +142,6 @@
}
/**
- * r100_pre_page_flip - pre-pageflip callback.
- *
- * @rdev: radeon_device pointer
- * @crtc: crtc to prepare for pageflip on
- *
- * Pre-pageflip callback (r1xx-r4xx).
- * Enables the pageflip irq (vblank irq).
- */
-void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
-{
- /* enable the pflip int */
- radeon_irq_kms_pflip_irq_get(rdev, crtc);
-}
-
-/**
- * r100_post_page_flip - pos-pageflip callback.
- *
- * @rdev: radeon_device pointer
- * @crtc: crtc to cleanup pageflip on
- *
- * Post-pageflip callback (r1xx-r4xx).
- * Disables the pageflip irq (vblank irq).
- */
-void r100_post_page_flip(struct radeon_device *rdev, int crtc)
-{
- /* disable the pflip int */
- radeon_irq_kms_pflip_irq_put(rdev, crtc);
-}
-
-/**
* r100_page_flip - pageflip callback.
*
* @rdev: radeon_device pointer
@@ -182,9 +152,8 @@
* During vblank we take the crtc lock and wait for the update_pending
* bit to go high, when it does, we release the lock, and allow the
* double buffered update to take place.
- * Returns the current update pending status.
*/
-u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+void r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
@@ -206,8 +175,24 @@
tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
+}
+
+/**
+ * r100_page_flip_pending - check if page flip is still pending
+ *
+ * @rdev: radeon_device pointer
+ * @crtc_id: crtc to check
+ *
+ * Check if the last pagefilp is still pending (r1xx-r4xx).
+ * Returns the current update pending status.
+ */
+bool r100_page_flip_pending(struct radeon_device *rdev, int crtc_id)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+
/* Return current update_pending status: */
- return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET;
+ return !!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) &
+ RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET);
}
/**
@@ -794,7 +779,7 @@
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_flip(rdev, 0);
+ radeon_crtc_handle_vblank(rdev, 0);
}
if (status & RADEON_CRTC2_VBLANK_STAT) {
if (rdev->irq.crtc_vblank_int[1]) {
@@ -803,7 +788,7 @@
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_flip(rdev, 1);
+ radeon_crtc_handle_vblank(rdev, 1);
}
if (status & RADEON_FP_DETECT_STAT) {
queue_hotplug = true;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6e887d0..436e550 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3876,7 +3876,7 @@
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_flip(rdev, 0);
+ radeon_crtc_handle_vblank(rdev, 0);
rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
}
@@ -3902,7 +3902,7 @@
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_flip(rdev, 1);
+ radeon_crtc_handle_vblank(rdev, 1);
rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
}
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 85a2bb2..26ef8ce 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -133,7 +133,7 @@
/*
* update the N and CTS parameters for a given pixel clock rate
*/
-static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -142,21 +142,33 @@
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
- WREG32(HDMI0_ACR_32_0 + offset, HDMI0_ACR_CTS_32(acr.cts_32khz));
- WREG32(HDMI0_ACR_32_1 + offset, acr.n_32khz);
+ WREG32_P(HDMI0_ACR_32_0 + offset,
+ HDMI0_ACR_CTS_32(acr.cts_32khz),
+ ~HDMI0_ACR_CTS_32_MASK);
+ WREG32_P(HDMI0_ACR_32_1 + offset,
+ HDMI0_ACR_N_32(acr.n_32khz),
+ ~HDMI0_ACR_N_32_MASK);
- WREG32(HDMI0_ACR_44_0 + offset, HDMI0_ACR_CTS_44(acr.cts_44_1khz));
- WREG32(HDMI0_ACR_44_1 + offset, acr.n_44_1khz);
+ WREG32_P(HDMI0_ACR_44_0 + offset,
+ HDMI0_ACR_CTS_44(acr.cts_44_1khz),
+ ~HDMI0_ACR_CTS_44_MASK);
+ WREG32_P(HDMI0_ACR_44_1 + offset,
+ HDMI0_ACR_N_44(acr.n_44_1khz),
+ ~HDMI0_ACR_N_44_MASK);
- WREG32(HDMI0_ACR_48_0 + offset, HDMI0_ACR_CTS_48(acr.cts_48khz));
- WREG32(HDMI0_ACR_48_1 + offset, acr.n_48khz);
+ WREG32_P(HDMI0_ACR_48_0 + offset,
+ HDMI0_ACR_CTS_48(acr.cts_48khz),
+ ~HDMI0_ACR_CTS_48_MASK);
+ WREG32_P(HDMI0_ACR_48_1 + offset,
+ HDMI0_ACR_N_48(acr.n_48khz),
+ ~HDMI0_ACR_N_48_MASK);
}
/*
* build a HDMI Video Info Frame
*/
-static void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
- void *buffer, size_t size)
+void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer,
+ size_t size)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -231,7 +243,7 @@
/*
* write the audio workaround status to the hardware
*/
-static void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
+void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -250,7 +262,7 @@
value, ~HDMI0_AUDIO_TEST_EN);
}
-static void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -320,121 +332,6 @@
}
}
-static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
-{
- struct radeon_device *rdev = encoder->dev->dev_private;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = NULL;
- u32 tmp;
- u8 *sadb;
- int sad_count;
-
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
-
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
- if (sad_count < 0) {
- DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
- return;
- }
-
- /* program the speaker allocation */
- tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
- tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
- /* set HDMI mode */
- tmp |= HDMI_CONNECTION;
- if (sad_count)
- tmp |= SPEAKER_ALLOCATION(sadb[0]);
- else
- tmp |= SPEAKER_ALLOCATION(5); /* stereo */
- WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
-
- kfree(sadb);
-}
-
-static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
-{
- struct radeon_device *rdev = encoder->dev->dev_private;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = NULL;
- struct cea_sad *sads;
- int i, sad_count;
-
- static const u16 eld_reg_to_type[][2] = {
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
- };
-
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
-
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
- if (sad_count < 0) {
- DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
- return;
- }
- BUG_ON(!sads);
-
- for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
- u32 value = 0;
- u8 stereo_freqs = 0;
- int max_channels = -1;
- int j;
-
- for (j = 0; j < sad_count; j++) {
- struct cea_sad *sad = &sads[j];
-
- if (sad->format == eld_reg_to_type[i][1]) {
- if (sad->channels > max_channels) {
- value = MAX_CHANNELS(sad->channels) |
- DESCRIPTOR_BYTE_2(sad->byte2) |
- SUPPORTED_FREQUENCIES(sad->freq);
- max_channels = sad->channels;
- }
-
- if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
- stereo_freqs |= sad->freq;
- else
- break;
- }
- }
-
- value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
-
- WREG32(eld_reg_to_type[i][0], value);
- }
-
- kfree(sads);
-}
-
/*
* update the info frames with the data from the current display mode
*/
@@ -447,6 +344,7 @@
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
struct hdmi_avi_infoframe frame;
uint32_t offset;
+ uint32_t acr_ctl;
ssize_t err;
if (!dig || !dig->afmt)
@@ -463,52 +361,44 @@
r600_audio_set_dto(encoder, mode->clock);
- WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
- HDMI0_NULL_SEND); /* send null packets when required */
+ WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset,
+ HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
+ HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
+ HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
+ HDMI0_60958_CS_UPDATE, /* allow 60958 channel status fields to be updated */
+ ~(HDMI0_AUDIO_SAMPLE_SEND |
+ HDMI0_AUDIO_DELAY_EN_MASK |
+ HDMI0_AUDIO_PACKETS_PER_LINE_MASK |
+ HDMI0_60958_CS_UPDATE));
- WREG32(HDMI0_AUDIO_CRC_CONTROL + offset, 0x1000);
+ /* DCE 3.0 uses register that's normally for CRC_CONTROL */
+ acr_ctl = ASIC_IS_DCE3(rdev) ? DCE3_HDMI0_ACR_PACKET_CONTROL :
+ HDMI0_ACR_PACKET_CONTROL;
+ WREG32_P(acr_ctl + offset,
+ HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */
+ HDMI0_ACR_AUTO_SEND, /* allow hw to sent ACR packets when required */
+ ~(HDMI0_ACR_SOURCE |
+ HDMI0_ACR_AUTO_SEND));
- if (ASIC_IS_DCE32(rdev)) {
- WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
- HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
- HDMI0_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
- WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
- AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
- AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
- } else {
- WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
- HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
- HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
- HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
- HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
- }
+ WREG32_OR(HDMI0_VBI_PACKET_CONTROL + offset,
+ HDMI0_NULL_SEND | /* send null packets when required */
+ HDMI0_GC_SEND | /* send general control packets */
+ HDMI0_GC_CONT); /* send general control packets every frame */
- if (ASIC_IS_DCE32(rdev)) {
- dce3_2_afmt_write_speaker_allocation(encoder);
- dce3_2_afmt_write_sad_regs(encoder);
- }
+ WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset,
+ HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */
+ HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+ HDMI0_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
- WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
- HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */
- HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
+ WREG32_P(HDMI0_INFOFRAME_CONTROL1 + offset,
+ HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */
+ HDMI0_AUDIO_INFO_LINE(2), /* anything other than 0 */
+ ~(HDMI0_AVI_INFO_LINE_MASK |
+ HDMI0_AUDIO_INFO_LINE_MASK));
- WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
- HDMI0_NULL_SEND | /* send null packets when required */
- HDMI0_GC_SEND | /* send general control packets */
- HDMI0_GC_CONT); /* send general control packets every frame */
-
- /* TODO: HDMI0_AUDIO_INFO_UPDATE */
- WREG32(HDMI0_INFOFRAME_CONTROL0 + offset,
- HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
- HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */
- HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
- HDMI0_AUDIO_INFO_CONT); /* send audio info frames every frame/field */
-
- WREG32(HDMI0_INFOFRAME_CONTROL1 + offset,
- HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */
- HDMI0_AUDIO_INFO_LINE(2)); /* anything other than 0 */
-
- WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */
+ WREG32_AND(HDMI0_GC + offset,
+ ~HDMI0_GC_AVMUTE); /* unset HDMI0_GC_AVMUTE */
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
if (err < 0) {
@@ -523,22 +413,45 @@
}
r600_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
+
+ /* fglrx duplicates INFOFRAME_CONTROL0 & INFOFRAME_CONTROL1 ops here */
+
+ WREG32_AND(HDMI0_GENERIC_PACKET_CONTROL + offset,
+ ~(HDMI0_GENERIC0_SEND |
+ HDMI0_GENERIC0_CONT |
+ HDMI0_GENERIC0_UPDATE |
+ HDMI0_GENERIC1_SEND |
+ HDMI0_GENERIC1_CONT |
+ HDMI0_GENERIC0_LINE_MASK |
+ HDMI0_GENERIC1_LINE_MASK));
+
r600_hdmi_update_ACR(encoder, mode->clock);
+ WREG32_P(HDMI0_60958_0 + offset,
+ HDMI0_60958_CS_CHANNEL_NUMBER_L(1),
+ ~(HDMI0_60958_CS_CHANNEL_NUMBER_L_MASK |
+ HDMI0_60958_CS_CLOCK_ACCURACY_MASK));
+
+ WREG32_P(HDMI0_60958_1 + offset,
+ HDMI0_60958_CS_CHANNEL_NUMBER_R(2),
+ ~HDMI0_60958_CS_CHANNEL_NUMBER_R_MASK);
+
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF);
WREG32(HDMI0_RAMP_CONTROL1 + offset, 0x007FFFFF);
WREG32(HDMI0_RAMP_CONTROL2 + offset, 0x00000001);
WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
- r600_hdmi_audio_workaround(encoder);
-
/* enable audio after to setting up hw */
r600_audio_enable(rdev, dig->afmt->pin, true);
}
-/*
- * update settings with current parameters from audio engine
+/**
+ * r600_hdmi_update_audio_settings - Update audio infoframe
+ *
+ * @encoder: drm encoder
+ *
+ * Gets info about current audio stream and updates audio infoframe.
*/
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
{
@@ -550,7 +463,7 @@
uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
struct hdmi_audio_infoframe frame;
uint32_t offset;
- uint32_t iec;
+ uint32_t value;
ssize_t err;
if (!dig->afmt || !dig->afmt->enabled)
@@ -563,60 +476,6 @@
DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n",
(int)audio.status_bits, (int)audio.category_code);
- iec = 0;
- if (audio.status_bits & AUDIO_STATUS_PROFESSIONAL)
- iec |= 1 << 0;
- if (audio.status_bits & AUDIO_STATUS_NONAUDIO)
- iec |= 1 << 1;
- if (audio.status_bits & AUDIO_STATUS_COPYRIGHT)
- iec |= 1 << 2;
- if (audio.status_bits & AUDIO_STATUS_EMPHASIS)
- iec |= 1 << 3;
-
- iec |= HDMI0_60958_CS_CATEGORY_CODE(audio.category_code);
-
- switch (audio.rate) {
- case 32000:
- iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x3);
- break;
- case 44100:
- iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x0);
- break;
- case 48000:
- iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x2);
- break;
- case 88200:
- iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x8);
- break;
- case 96000:
- iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xa);
- break;
- case 176400:
- iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xc);
- break;
- case 192000:
- iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xe);
- break;
- }
-
- WREG32(HDMI0_60958_0 + offset, iec);
-
- iec = 0;
- switch (audio.bits_per_sample) {
- case 16:
- iec |= HDMI0_60958_CS_WORD_LENGTH(0x2);
- break;
- case 20:
- iec |= HDMI0_60958_CS_WORD_LENGTH(0x3);
- break;
- case 24:
- iec |= HDMI0_60958_CS_WORD_LENGTH(0xb);
- break;
- }
- if (audio.status_bits & AUDIO_STATUS_V)
- iec |= 0x5 << 16;
- WREG32_P(HDMI0_60958_1 + offset, iec, ~0x5000f);
-
err = hdmi_audio_infoframe_init(&frame);
if (err < 0) {
DRM_ERROR("failed to setup audio infoframe\n");
@@ -631,8 +490,22 @@
return;
}
+ value = RREG32(HDMI0_AUDIO_PACKET_CONTROL + offset);
+ if (value & HDMI0_AUDIO_TEST_EN)
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
+ value & ~HDMI0_AUDIO_TEST_EN);
+
+ WREG32_OR(HDMI0_CONTROL + offset,
+ HDMI0_ERROR_ACK);
+
+ WREG32_AND(HDMI0_INFOFRAME_CONTROL0 + offset,
+ ~HDMI0_AUDIO_INFO_SOURCE);
+
r600_hdmi_update_audio_infoframe(encoder, buffer, sizeof(buffer));
- r600_hdmi_audio_workaround(encoder);
+
+ WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset,
+ HDMI0_AUDIO_INFO_CONT |
+ HDMI0_AUDIO_INFO_UPDATE);
}
/*
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 37455f6..f94e7a9 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1029,15 +1029,18 @@
#define HDMI0_AUDIO_PACKET_CONTROL 0x7408
# define HDMI0_AUDIO_SAMPLE_SEND (1 << 0)
# define HDMI0_AUDIO_DELAY_EN(x) (((x) & 3) << 4)
+# define HDMI0_AUDIO_DELAY_EN_MASK (3 << 4)
# define HDMI0_AUDIO_SEND_MAX_PACKETS (1 << 8)
# define HDMI0_AUDIO_TEST_EN (1 << 12)
# define HDMI0_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16)
+# define HDMI0_AUDIO_PACKETS_PER_LINE_MASK (0x1f << 16)
# define HDMI0_AUDIO_CHANNEL_SWAP (1 << 24)
# define HDMI0_60958_CS_UPDATE (1 << 26)
# define HDMI0_AZ_FORMAT_WTRIG_MASK (1 << 28)
# define HDMI0_AZ_FORMAT_WTRIG_ACK (1 << 29)
#define HDMI0_AUDIO_CRC_CONTROL 0x740c
# define HDMI0_AUDIO_CRC_EN (1 << 0)
+#define DCE3_HDMI0_ACR_PACKET_CONTROL 0x740c
#define HDMI0_VBI_PACKET_CONTROL 0x7410
# define HDMI0_NULL_SEND (1 << 0)
# define HDMI0_GC_SEND (1 << 4)
@@ -1054,7 +1057,9 @@
# define HDMI0_MPEG_INFO_UPDATE (1 << 10)
#define HDMI0_INFOFRAME_CONTROL1 0x7418
# define HDMI0_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
+# define HDMI0_AVI_INFO_LINE_MASK (0x3f << 0)
# define HDMI0_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
+# define HDMI0_AUDIO_INFO_LINE_MASK (0x3f << 8)
# define HDMI0_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
#define HDMI0_GENERIC_PACKET_CONTROL 0x741c
# define HDMI0_GENERIC0_SEND (1 << 0)
@@ -1063,7 +1068,9 @@
# define HDMI0_GENERIC1_SEND (1 << 4)
# define HDMI0_GENERIC1_CONT (1 << 5)
# define HDMI0_GENERIC0_LINE(x) (((x) & 0x3f) << 16)
+# define HDMI0_GENERIC0_LINE_MASK (0x3f << 16)
# define HDMI0_GENERIC1_LINE(x) (((x) & 0x3f) << 24)
+# define HDMI0_GENERIC1_LINE_MASK (0x3f << 24)
#define HDMI0_GC 0x7428
# define HDMI0_GC_AVMUTE (1 << 0)
#define HDMI0_AVI_INFO0 0x7454
@@ -1119,16 +1126,22 @@
#define HDMI0_GENERIC1_6 0x74a8
#define HDMI0_ACR_32_0 0x74ac
# define HDMI0_ACR_CTS_32(x) (((x) & 0xfffff) << 12)
+# define HDMI0_ACR_CTS_32_MASK (0xfffff << 12)
#define HDMI0_ACR_32_1 0x74b0
# define HDMI0_ACR_N_32(x) (((x) & 0xfffff) << 0)
+# define HDMI0_ACR_N_32_MASK (0xfffff << 0)
#define HDMI0_ACR_44_0 0x74b4
# define HDMI0_ACR_CTS_44(x) (((x) & 0xfffff) << 12)
+# define HDMI0_ACR_CTS_44_MASK (0xfffff << 12)
#define HDMI0_ACR_44_1 0x74b8
# define HDMI0_ACR_N_44(x) (((x) & 0xfffff) << 0)
+# define HDMI0_ACR_N_44_MASK (0xfffff << 0)
#define HDMI0_ACR_48_0 0x74bc
# define HDMI0_ACR_CTS_48(x) (((x) & 0xfffff) << 12)
+# define HDMI0_ACR_CTS_48_MASK (0xfffff << 12)
#define HDMI0_ACR_48_1 0x74c0
# define HDMI0_ACR_N_48(x) (((x) & 0xfffff) << 0)
+# define HDMI0_ACR_N_48_MASK (0xfffff << 0)
#define HDMI0_ACR_STATUS_0 0x74c4
#define HDMI0_ACR_STATUS_1 0x74c8
#define HDMI0_AUDIO_INFO0 0x74cc
@@ -1148,14 +1161,17 @@
# define HDMI0_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8)
# define HDMI0_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16)
# define HDMI0_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_L_MASK (0xf << 20)
# define HDMI0_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
# define HDMI0_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28)
+# define HDMI0_60958_CS_CLOCK_ACCURACY_MASK (3 << 28)
#define HDMI0_60958_1 0x74d8
# define HDMI0_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0)
# define HDMI0_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4)
# define HDMI0_60958_CS_VALID_L(x) (((x) & 1) << 16)
# define HDMI0_60958_CS_VALID_R(x) (((x) & 1) << 18)
# define HDMI0_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_R_MASK (0xf << 20)
#define HDMI0_ACR_PACKET_CONTROL 0x74dc
# define HDMI0_ACR_SEND (1 << 0)
# define HDMI0_ACR_CONT (1 << 1)
@@ -1166,6 +1182,7 @@
# define HDMI0_ACR_48 3
# define HDMI0_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */
# define HDMI0_ACR_AUTO_SEND (1 << 12)
+#define DCE3_HDMI0_AUDIO_CRC_CONTROL 0x74dc
#define HDMI0_RAMP_CONTROL0 0x74e0
# define HDMI0_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0)
#define HDMI0_RAMP_CONTROL1 0x74e4
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b58e1af..dd4da88 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -676,14 +676,16 @@
* IRQS.
*/
-struct radeon_unpin_work {
- struct work_struct work;
- struct radeon_device *rdev;
- int crtc_id;
- struct radeon_fence *fence;
+struct radeon_flip_work {
+ struct work_struct flip_work;
+ struct work_struct unpin_work;
+ struct radeon_device *rdev;
+ int crtc_id;
+ struct drm_framebuffer *fb;
struct drm_pending_vblank_event *event;
- struct radeon_bo *old_rbo;
- u64 new_crtc_base;
+ struct radeon_bo *old_rbo;
+ struct radeon_bo *new_rbo;
+ struct radeon_fence *fence;
};
struct r500_irq_stat_regs {
@@ -848,6 +850,15 @@
#define R600_PTE_READABLE (1 << 5)
#define R600_PTE_WRITEABLE (1 << 6)
+/* PTE (Page Table Entry) fragment field for different page sizes */
+#define R600_PTE_FRAG_4KB (0 << 7)
+#define R600_PTE_FRAG_64KB (4 << 7)
+#define R600_PTE_FRAG_256KB (6 << 7)
+
+/* flags used for GART page table entries on R600+ */
+#define R600_PTE_GART ( R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED \
+ | R600_PTE_READABLE | R600_PTE_WRITEABLE)
+
struct radeon_vm_pt {
struct radeon_bo *bo;
uint64_t addr;
@@ -1876,9 +1887,8 @@
} dpm;
/* pageflipping */
struct {
- void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
- u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
- void (*post_page_flip)(struct radeon_device *rdev, int crtc);
+ void (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
+ bool (*page_flip_pending)(struct radeon_device *rdev, int crtc);
} pflip;
};
@@ -2737,9 +2747,8 @@
#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
-#define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc))
#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base))
-#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc))
+#define radeon_page_flip_pending(rdev, crtc) (rdev)->asic->pflip.page_flip_pending((rdev), (crtc))
#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
#define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev))
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 4243334..a9297b2 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -117,9 +117,6 @@
/* ATI Host Bridge / RV280 [M9+] Needs AGPMode 1 (phoronix forum) */
{ PCI_VENDOR_ID_ATI, 0xcbb2, PCI_VENDOR_ID_ATI, 0x5c61,
PCI_VENDOR_ID_SONY, 0x8175, 1},
- /* HP Host Bridge / R300 [FireGL X1] Needs AGPMode 2 (fdo #7770) */
- { PCI_VENDOR_ID_HP, 0x122e, PCI_VENDOR_ID_ATI, 0x4e47,
- PCI_VENDOR_ID_ATI, 0x0152, 2},
{ 0, 0, 0, 0, 0, 0, 0 },
};
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index b8a24a7..d8e1587 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -248,9 +248,8 @@
.set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
- .pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
- .post_page_flip = &r100_post_page_flip,
+ .page_flip_pending = &r100_page_flip_pending,
},
};
@@ -315,9 +314,8 @@
.set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
- .pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
- .post_page_flip = &r100_post_page_flip,
+ .page_flip_pending = &r100_page_flip_pending,
},
};
@@ -396,9 +394,8 @@
.set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
- .pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
- .post_page_flip = &r100_post_page_flip,
+ .page_flip_pending = &r100_page_flip_pending,
},
};
@@ -463,9 +460,8 @@
.set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
- .pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
- .post_page_flip = &r100_post_page_flip,
+ .page_flip_pending = &r100_page_flip_pending,
},
};
@@ -530,9 +526,8 @@
.set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
- .pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
- .post_page_flip = &r100_post_page_flip,
+ .page_flip_pending = &r100_page_flip_pending,
},
};
@@ -597,9 +592,8 @@
.set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
- .pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
- .post_page_flip = &r100_post_page_flip,
+ .page_flip_pending = &r100_page_flip_pending,
},
};
@@ -666,9 +660,8 @@
.set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
- .pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
- .post_page_flip = &rs600_post_page_flip,
+ .page_flip_pending = &rs600_page_flip_pending,
},
};
@@ -735,9 +728,8 @@
.set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
- .pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
- .post_page_flip = &rs600_post_page_flip,
+ .page_flip_pending = &rs600_page_flip_pending,
},
};
@@ -802,9 +794,8 @@
.set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
- .pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
- .post_page_flip = &rs600_post_page_flip,
+ .page_flip_pending = &rs600_page_flip_pending,
},
};
@@ -869,9 +860,8 @@
.set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
- .pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
- .post_page_flip = &rs600_post_page_flip,
+ .page_flip_pending = &rs600_page_flip_pending,
},
};
@@ -968,9 +958,8 @@
.get_temperature = &rv6xx_get_temp,
},
.pflip = {
- .pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
- .post_page_flip = &rs600_post_page_flip,
+ .page_flip_pending = &rs600_page_flip_pending,
},
};
@@ -1059,9 +1048,8 @@
.force_performance_level = &rv6xx_dpm_force_performance_level,
},
.pflip = {
- .pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
- .post_page_flip = &rs600_post_page_flip,
+ .page_flip_pending = &rs600_page_flip_pending,
},
};
@@ -1150,9 +1138,8 @@
.force_performance_level = &rs780_dpm_force_performance_level,
},
.pflip = {
- .pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
- .post_page_flip = &rs600_post_page_flip,
+ .page_flip_pending = &rs600_page_flip_pending,
},
};
@@ -1201,7 +1188,7 @@
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &r600_hdmi_enable,
- .hdmi_setmode = &r600_hdmi_setmode,
+ .hdmi_setmode = &dce3_1_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_cpdma,
@@ -1256,9 +1243,8 @@
.vblank_too_short = &rv770_dpm_vblank_too_short,
},
.pflip = {
- .pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rv770_page_flip,
- .post_page_flip = &rs600_post_page_flip,
+ .page_flip_pending = &rv770_page_flip_pending,
},
};
@@ -1375,9 +1361,8 @@
.vblank_too_short = &cypress_dpm_vblank_too_short,
},
.pflip = {
- .pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
- .post_page_flip = &evergreen_post_page_flip,
+ .page_flip_pending = &evergreen_page_flip_pending,
},
};
@@ -1467,9 +1452,8 @@
.force_performance_level = &sumo_dpm_force_performance_level,
},
.pflip = {
- .pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
- .post_page_flip = &evergreen_post_page_flip,
+ .page_flip_pending = &evergreen_page_flip_pending,
},
};
@@ -1560,9 +1544,8 @@
.vblank_too_short = &btc_dpm_vblank_too_short,
},
.pflip = {
- .pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
- .post_page_flip = &evergreen_post_page_flip,
+ .page_flip_pending = &evergreen_page_flip_pending,
},
};
@@ -1704,9 +1687,8 @@
.vblank_too_short = &ni_dpm_vblank_too_short,
},
.pflip = {
- .pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
- .post_page_flip = &evergreen_post_page_flip,
+ .page_flip_pending = &evergreen_page_flip_pending,
},
};
@@ -1805,9 +1787,8 @@
.enable_bapm = &trinity_dpm_enable_bapm,
},
.pflip = {
- .pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
- .post_page_flip = &evergreen_post_page_flip,
+ .page_flip_pending = &evergreen_page_flip_pending,
},
};
@@ -1936,9 +1917,8 @@
.vblank_too_short = &ni_dpm_vblank_too_short,
},
.pflip = {
- .pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
- .post_page_flip = &evergreen_post_page_flip,
+ .page_flip_pending = &evergreen_page_flip_pending,
},
};
@@ -2099,9 +2079,8 @@
.powergate_uvd = &ci_dpm_powergate_uvd,
},
.pflip = {
- .pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
- .post_page_flip = &evergreen_post_page_flip,
+ .page_flip_pending = &evergreen_page_flip_pending,
},
};
@@ -2204,9 +2183,8 @@
.enable_bapm = &kv_dpm_enable_bapm,
},
.pflip = {
- .pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
- .post_page_flip = &evergreen_post_page_flip,
+ .page_flip_pending = &evergreen_page_flip_pending,
},
};
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 3d55a3a..0eab015 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -135,9 +135,9 @@
extern void r100_pm_finish(struct radeon_device *rdev);
extern void r100_pm_init_profile(struct radeon_device *rdev);
extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
-extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc);
-extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
-extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
+extern void r100_page_flip(struct radeon_device *rdev, int crtc,
+ u64 crtc_base);
+extern bool r100_page_flip_pending(struct radeon_device *rdev, int crtc);
extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc);
extern int r100_mc_wait_for_idle(struct radeon_device *rdev);
@@ -241,9 +241,9 @@
extern void rs600_pm_misc(struct radeon_device *rdev);
extern void rs600_pm_prepare(struct radeon_device *rdev);
extern void rs600_pm_finish(struct radeon_device *rdev);
-extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc);
-extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
-extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc);
+extern void rs600_page_flip(struct radeon_device *rdev, int crtc,
+ u64 crtc_base);
+extern bool rs600_page_flip_pending(struct radeon_device *rdev, int crtc);
void rs600_set_safe_registers(struct radeon_device *rdev);
extern void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc);
extern int rs600_mc_wait_for_idle(struct radeon_device *rdev);
@@ -387,6 +387,11 @@
int r600_audio_init(struct radeon_device *rdev);
struct r600_audio_pin r600_audio_status(struct radeon_device *rdev);
void r600_audio_fini(struct radeon_device *rdev);
+void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock);
+void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer,
+ size_t size);
+void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock);
+void r600_hdmi_audio_workaround(struct drm_encoder *encoder);
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
@@ -447,7 +452,8 @@
int rv770_suspend(struct radeon_device *rdev);
int rv770_resume(struct radeon_device *rdev);
void rv770_pm_misc(struct radeon_device *rdev);
-u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+void rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+bool rv770_page_flip_pending(struct radeon_device *rdev, int crtc);
void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
void r700_cp_stop(struct radeon_device *rdev);
void r700_cp_fini(struct radeon_device *rdev);
@@ -458,6 +464,8 @@
u32 rv770_get_xclk(struct radeon_device *rdev);
int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int rv770_get_temp(struct radeon_device *rdev);
+/* hdmi */
+void dce3_1_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
/* rv7xx pm */
int rv770_dpm_init(struct radeon_device *rdev);
int rv770_dpm_enable(struct radeon_device *rdev);
@@ -513,9 +521,9 @@
extern void btc_pm_init_profile(struct radeon_device *rdev);
int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
-extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
-extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
-extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
+extern void evergreen_page_flip(struct radeon_device *rdev, int crtc,
+ u64 crtc_base);
+extern bool evergreen_page_flip_pending(struct radeon_device *rdev, int crtc);
extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
void evergreen_disable_interrupt_state(struct radeon_device *rdev);
int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index ea50e0a..4522f7d 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -48,6 +48,7 @@
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
/* if the connector is already off, don't turn it back on */
+ /* FIXME: This access isn't protected by any locks. */
if (connector->dpms != DRM_MODE_DPMS_ON)
return;
@@ -145,6 +146,31 @@
}
break;
}
+
+ if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ /* hdmi deep color only implemented on DCE4+ */
+ if ((bpc > 8) && !ASIC_IS_DCE4(rdev)) {
+ DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 8 bpc.\n",
+ connector->name, bpc);
+ bpc = 8;
+ }
+
+ /*
+ * Pre DCE-8 hw can't handle > 12 bpc, and more than 12 bpc doesn't make
+ * much sense without support for > 12 bpc framebuffers. RGB 4:4:4 at
+ * 12 bpc is always supported on hdmi deep color sinks, as this is
+ * required by the HDMI-1.3 spec. Clamp to a safe 12 bpc maximum.
+ */
+ if (bpc > 12) {
+ DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 12 bpc.\n",
+ connector->name, bpc);
+ bpc = 12;
+ }
+ }
+
+ DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n",
+ connector->name, connector->display_info.bpc, bpc);
+
return bpc;
}
@@ -260,13 +286,17 @@
continue;
if (priority == true) {
- DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
- DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector));
+ DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n",
+ conflict->name);
+ DRM_DEBUG_KMS("in favor of %s\n",
+ connector->name);
conflict->status = connector_status_disconnected;
radeon_connector_update_scratch_regs(conflict, connector_status_disconnected);
} else {
- DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
- DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict));
+ DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n",
+ connector->name);
+ DRM_DEBUG_KMS("in favor of %s\n",
+ conflict->name);
current_status = connector_status_disconnected;
}
break;
@@ -787,7 +817,7 @@
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
- drm_get_connector_name(connector));
+ connector->name);
ret = connector_status_connected;
} else {
radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
@@ -1010,12 +1040,13 @@
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
- drm_get_connector_name(connector));
+ connector->name);
/* rs690 seems to have a problem with connectors not existing and always
* return a block of 0's. If we see this just stop polling on this output */
if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) {
ret = connector_status_disconnected;
- DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", drm_get_connector_name(connector));
+ DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n",
+ connector->name);
radeon_connector->ddc_bus = NULL;
} else {
ret = connector_status_connected;
@@ -1387,7 +1418,7 @@
struct radeon_device *rdev = dev->dev_private;
if (ASIC_IS_DCE5(rdev) &&
- (rdev->clock.dp_extclk >= 53900) &&
+ (rdev->clock.default_dispclk >= 53900) &&
radeon_connector_encoder_is_hbr2(connector)) {
return true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 8d99d5e..e330e76 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -249,16 +249,21 @@
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
drm_crtc_cleanup(crtc);
+ destroy_workqueue(radeon_crtc->flip_queue);
kfree(radeon_crtc);
}
-/*
- * Handle unpin events outside the interrupt handler proper.
+/**
+ * radeon_unpin_work_func - unpin old buffer object
+ *
+ * @__work - kernel work item
+ *
+ * Unpin the old frame buffer object outside of the interrupt handler
*/
static void radeon_unpin_work_func(struct work_struct *__work)
{
- struct radeon_unpin_work *work =
- container_of(__work, struct radeon_unpin_work, work);
+ struct radeon_flip_work *work =
+ container_of(__work, struct radeon_flip_work, unpin_work);
int r;
/* unpin of the old buffer */
@@ -276,33 +281,22 @@
kfree(work);
}
-void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
+void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
- struct radeon_unpin_work *work;
+ struct radeon_flip_work *work;
unsigned long flags;
u32 update_pending;
int vpos, hpos;
spin_lock_irqsave(&rdev->ddev->event_lock, flags);
- work = radeon_crtc->unpin_work;
- if (work == NULL ||
- (work->fence && !radeon_fence_signaled(work->fence))) {
+ work = radeon_crtc->flip_work;
+ if (work == NULL) {
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
return;
}
- /* New pageflip, or just completion of a previous one? */
- if (!radeon_crtc->deferred_flip_completion) {
- /* do the flip (mmio) */
- update_pending = radeon_page_flip(rdev, crtc_id, work->new_crtc_base);
- } else {
- /* This is just a completion of a flip queued in crtc
- * at last invocation. Make sure we go directly to
- * completion routine.
- */
- update_pending = 0;
- radeon_crtc->deferred_flip_completion = 0;
- }
+
+ update_pending = radeon_page_flip_pending(rdev, crtc_id);
/* Has the pageflip already completed in crtc, or is it certain
* to complete in this vblank?
@@ -320,19 +314,38 @@
*/
update_pending = 0;
}
- if (update_pending) {
- /* crtc didn't flip in this target vblank interval,
- * but flip is pending in crtc. It will complete it
- * in next vblank interval, so complete the flip at
- * next vblank irq.
- */
- radeon_crtc->deferred_flip_completion = 1;
+ spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+ if (!update_pending)
+ radeon_crtc_handle_flip(rdev, crtc_id);
+}
+
+/**
+ * radeon_crtc_handle_flip - page flip completed
+ *
+ * @rdev: radeon device pointer
+ * @crtc_id: crtc number this event is for
+ *
+ * Called when we are sure that a page flip for this crtc is completed.
+ */
+void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+ struct radeon_flip_work *work;
+ unsigned long flags;
+
+ /* this can happen at init */
+ if (radeon_crtc == NULL)
+ return;
+
+ spin_lock_irqsave(&rdev->ddev->event_lock, flags);
+ work = radeon_crtc->flip_work;
+ if (work == NULL) {
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
return;
}
- /* Pageflip (will be) certainly completed in this vblank. Clean up. */
- radeon_crtc->unpin_work = NULL;
+ /* Pageflip completed. Clean up. */
+ radeon_crtc->flip_work = NULL;
/* wakeup userspace */
if (work->event)
@@ -340,86 +353,71 @@
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
- drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
radeon_fence_unref(&work->fence);
- radeon_post_page_flip(work->rdev, work->crtc_id);
- schedule_work(&work->work);
+ radeon_irq_kms_pflip_irq_get(rdev, work->crtc_id);
+ queue_work(radeon_crtc->flip_queue, &work->unpin_work);
}
-static int radeon_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
+/**
+ * radeon_flip_work_func - page flip framebuffer
+ *
+ * @work - kernel work item
+ *
+ * Wait for the buffer object to become idle and do the actual page flip
+ */
+static void radeon_flip_work_func(struct work_struct *__work)
{
- struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- struct radeon_framebuffer *old_radeon_fb;
- struct radeon_framebuffer *new_radeon_fb;
- struct drm_gem_object *obj;
- struct radeon_bo *rbo;
- struct radeon_unpin_work *work;
+ struct radeon_flip_work *work =
+ container_of(__work, struct radeon_flip_work, flip_work);
+ struct radeon_device *rdev = work->rdev;
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id];
+
+ struct drm_crtc *crtc = &radeon_crtc->base;
+ struct drm_framebuffer *fb = work->fb;
+
+ uint32_t tiling_flags, pitch_pixels;
+ uint64_t base;
+
unsigned long flags;
- u32 tiling_flags, pitch_pixels;
- u64 base;
int r;
- work = kzalloc(sizeof *work, GFP_KERNEL);
- if (work == NULL)
- return -ENOMEM;
+ down_read(&rdev->exclusive_lock);
+ while (work->fence) {
+ r = radeon_fence_wait(work->fence, false);
+ if (r == -EDEADLK) {
+ up_read(&rdev->exclusive_lock);
+ r = radeon_gpu_reset(rdev);
+ down_read(&rdev->exclusive_lock);
+ }
- work->event = event;
- work->rdev = rdev;
- work->crtc_id = radeon_crtc->crtc_id;
- old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
- new_radeon_fb = to_radeon_framebuffer(fb);
- /* schedule unpin of the old buffer */
- obj = old_radeon_fb->obj;
- /* take a reference to the old object */
- drm_gem_object_reference(obj);
- rbo = gem_to_radeon_bo(obj);
- work->old_rbo = rbo;
- obj = new_radeon_fb->obj;
- rbo = gem_to_radeon_bo(obj);
-
- spin_lock(&rbo->tbo.bdev->fence_lock);
- if (rbo->tbo.sync_obj)
- work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
- spin_unlock(&rbo->tbo.bdev->fence_lock);
-
- INIT_WORK(&work->work, radeon_unpin_work_func);
-
- /* We borrow the event spin lock for protecting unpin_work */
- spin_lock_irqsave(&dev->event_lock, flags);
- if (radeon_crtc->unpin_work) {
- DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
- r = -EBUSY;
- goto unlock_free;
+ if (r) {
+ DRM_ERROR("failed to wait on page flip fence (%d)!\n",
+ r);
+ goto cleanup;
+ } else
+ radeon_fence_unref(&work->fence);
}
- radeon_crtc->unpin_work = work;
- radeon_crtc->deferred_flip_completion = 0;
- spin_unlock_irqrestore(&dev->event_lock, flags);
/* pin the new buffer */
DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
- work->old_rbo, rbo);
+ work->old_rbo, work->new_rbo);
- r = radeon_bo_reserve(rbo, false);
+ r = radeon_bo_reserve(work->new_rbo, false);
if (unlikely(r != 0)) {
DRM_ERROR("failed to reserve new rbo buffer before flip\n");
- goto pflip_cleanup;
+ goto cleanup;
}
/* Only 27 bit offset for legacy CRTC */
- r = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM,
+ r = radeon_bo_pin_restricted(work->new_rbo, RADEON_GEM_DOMAIN_VRAM,
ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base);
if (unlikely(r != 0)) {
- radeon_bo_unreserve(rbo);
+ radeon_bo_unreserve(work->new_rbo);
r = -EINVAL;
DRM_ERROR("failed to pin new rbo buffer before flip\n");
- goto pflip_cleanup;
+ goto cleanup;
}
- radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
- radeon_bo_unreserve(rbo);
+ radeon_bo_get_tiling_flags(work->new_rbo, &tiling_flags, NULL);
+ radeon_bo_unreserve(work->new_rbo);
if (!ASIC_IS_AVIVO(rdev)) {
/* crtc offset is from display base addr not FB location */
@@ -457,44 +455,91 @@
base &= ~7;
}
- spin_lock_irqsave(&dev->event_lock, flags);
- work->new_crtc_base = base;
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ /* We borrow the event spin lock for protecting flip_work */
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
/* update crtc fb */
crtc->primary->fb = fb;
- r = drm_vblank_get(dev, radeon_crtc->crtc_id);
- if (r) {
- DRM_ERROR("failed to get vblank before flip\n");
- goto pflip_cleanup1;
- }
-
/* set the proper interrupt */
- radeon_pre_page_flip(rdev, radeon_crtc->crtc_id);
+ radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
- return 0;
+ /* do the flip (mmio) */
+ radeon_page_flip(rdev, radeon_crtc->crtc_id, base);
-pflip_cleanup1:
- if (unlikely(radeon_bo_reserve(rbo, false) != 0)) {
- DRM_ERROR("failed to reserve new rbo in error path\n");
- goto pflip_cleanup;
- }
- if (unlikely(radeon_bo_unpin(rbo) != 0)) {
- DRM_ERROR("failed to unpin new rbo in error path\n");
- }
- radeon_bo_unreserve(rbo);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ up_read(&rdev->exclusive_lock);
-pflip_cleanup:
- spin_lock_irqsave(&dev->event_lock, flags);
- radeon_crtc->unpin_work = NULL;
-unlock_free:
- spin_unlock_irqrestore(&dev->event_lock, flags);
- drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
+ return;
+
+cleanup:
+ drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
radeon_fence_unref(&work->fence);
kfree(work);
+ up_read(&rdev->exclusive_lock);
+}
- return r;
+static int radeon_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_framebuffer *old_radeon_fb;
+ struct radeon_framebuffer *new_radeon_fb;
+ struct drm_gem_object *obj;
+ struct radeon_flip_work *work;
+ unsigned long flags;
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (work == NULL)
+ return -ENOMEM;
+
+ INIT_WORK(&work->flip_work, radeon_flip_work_func);
+ INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
+
+ work->rdev = rdev;
+ work->crtc_id = radeon_crtc->crtc_id;
+ work->fb = fb;
+ work->event = event;
+
+ /* schedule unpin of the old buffer */
+ old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
+ obj = old_radeon_fb->obj;
+
+ /* take a reference to the old object */
+ drm_gem_object_reference(obj);
+ work->old_rbo = gem_to_radeon_bo(obj);
+
+ new_radeon_fb = to_radeon_framebuffer(fb);
+ obj = new_radeon_fb->obj;
+ work->new_rbo = gem_to_radeon_bo(obj);
+
+ spin_lock(&work->new_rbo->tbo.bdev->fence_lock);
+ if (work->new_rbo->tbo.sync_obj)
+ work->fence = radeon_fence_ref(work->new_rbo->tbo.sync_obj);
+ spin_unlock(&work->new_rbo->tbo.bdev->fence_lock);
+
+ /* We borrow the event spin lock for protecting flip_work */
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+ if (radeon_crtc->flip_work) {
+ DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
+ radeon_fence_unref(&work->fence);
+ kfree(work);
+ return -EBUSY;
+ }
+ radeon_crtc->flip_work = work;
+
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ queue_work(radeon_crtc->flip_queue, &work->flip_work);
+
+ return 0;
}
static int
@@ -564,6 +609,7 @@
drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
radeon_crtc->crtc_id = index;
+ radeon_crtc->flip_queue = create_singlethread_workqueue("radeon-crtc");
rdev->mode_info.crtcs[index] = radeon_crtc;
if (rdev->family >= CHIP_BONAIRE) {
@@ -657,7 +703,7 @@
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
DRM_INFO("Connector %d:\n", i);
- DRM_INFO(" %s\n", drm_get_connector_name(connector));
+ DRM_INFO(" %s\n", connector->name);
if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
if (radeon_connector->ddc_bus) {
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 7b94414..427ee4d 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -94,6 +94,8 @@
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t temp;
+ mutex_lock(&i2c->mutex);
+
/* RV410 appears to have a bug where the hw i2c in reset
* holds the i2c port in a bad state - switch hw i2c away before
* doing DDC - do this for all r200s/r300s/r400s for safety sake
@@ -170,6 +172,8 @@
temp = RREG32(rec->mask_data_reg) & ~rec->mask_data_mask;
WREG32(rec->mask_data_reg, temp);
temp = RREG32(rec->mask_data_reg);
+
+ mutex_unlock(&i2c->mutex);
}
static int get_clock(void *i2c_priv)
@@ -813,6 +817,8 @@
struct radeon_i2c_bus_rec *rec = &i2c->rec;
int ret = 0;
+ mutex_lock(&i2c->mutex);
+
switch (rdev->family) {
case CHIP_R100:
case CHIP_RV100:
@@ -879,6 +885,8 @@
break;
}
+ mutex_unlock(&i2c->mutex);
+
return ret;
}
@@ -919,6 +927,7 @@
i2c->adapter.dev.parent = &dev->pdev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
+ mutex_init(&i2c->mutex);
if (rec->mm_i2c ||
(rec->hw_capable &&
radeon_hw_i2c &&
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 6ddf31a..ea72ad8 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -191,6 +191,7 @@
struct radeon_i2c_bus_rec rec;
struct drm_dp_aux aux;
bool has_aux;
+ struct mutex mutex;
};
/* mostly for macs, but really any system without connector tables */
@@ -324,8 +325,8 @@
struct drm_display_mode native_mode;
int pll_id;
/* page flipping */
- struct radeon_unpin_work *unpin_work;
- int deferred_flip_completion;
+ struct workqueue_struct *flip_queue;
+ struct radeon_flip_work *flip_work;
/* pll sharing */
struct radeon_atom_ss ss;
bool ss_enabled;
@@ -906,6 +907,7 @@
void radeon_fb_output_poll_changed(struct radeon_device *rdev);
+void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id);
void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 19bec0d..95197aa 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -722,7 +722,7 @@
{
int r;
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
if (unlikely(r != 0))
return r;
spin_lock(&bo->tbo.bdev->fence_lock);
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 9e7b25a0..5a873f3 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -65,7 +65,7 @@
{
int r;
- r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0);
+ r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 2aae6ce..a128a4f 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -658,6 +658,84 @@
}
/**
+ * radeon_vm_frag_ptes - add fragment information to PTEs
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB for the update
+ * @pe_start: first PTE to handle
+ * @pe_end: last PTE to handle
+ * @addr: addr those PTEs should point to
+ * @flags: hw mapping flags
+ *
+ * Global and local mutex must be locked!
+ */
+static void radeon_vm_frag_ptes(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe_start, uint64_t pe_end,
+ uint64_t addr, uint32_t flags)
+{
+ /**
+ * The MC L1 TLB supports variable sized pages, based on a fragment
+ * field in the PTE. When this field is set to a non-zero value, page
+ * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
+ * flags are considered valid for all PTEs within the fragment range
+ * and corresponding mappings are assumed to be physically contiguous.
+ *
+ * The L1 TLB can store a single PTE for the whole fragment,
+ * significantly increasing the space available for translation
+ * caching. This leads to large improvements in throughput when the
+ * TLB is under pressure.
+ *
+ * The L2 TLB distributes small and large fragments into two
+ * asymmetric partitions. The large fragment cache is significantly
+ * larger. Thus, we try to use large fragments wherever possible.
+ * Userspace can support this by aligning virtual base address and
+ * allocation size to the fragment size.
+ */
+
+ /* NI is optimized for 256KB fragments, SI and newer for 64KB */
+ uint64_t frag_flags = rdev->family == CHIP_CAYMAN ?
+ R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB;
+ uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80;
+
+ uint64_t frag_start = ALIGN(pe_start, frag_align);
+ uint64_t frag_end = pe_end & ~(frag_align - 1);
+
+ unsigned count;
+
+ /* system pages are non continuously */
+ if ((flags & R600_PTE_SYSTEM) || !(flags & R600_PTE_VALID) ||
+ (frag_start >= frag_end)) {
+
+ count = (pe_end - pe_start) / 8;
+ radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
+ RADEON_GPU_PAGE_SIZE, flags);
+ return;
+ }
+
+ /* handle the 4K area at the beginning */
+ if (pe_start != frag_start) {
+ count = (frag_start - pe_start) / 8;
+ radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
+ RADEON_GPU_PAGE_SIZE, flags);
+ addr += RADEON_GPU_PAGE_SIZE * count;
+ }
+
+ /* handle the area in the middle */
+ count = (frag_end - frag_start) / 8;
+ radeon_asic_vm_set_page(rdev, ib, frag_start, addr, count,
+ RADEON_GPU_PAGE_SIZE, flags | frag_flags);
+
+ /* handle the 4K area at the end */
+ if (frag_end != pe_end) {
+ addr += RADEON_GPU_PAGE_SIZE * count;
+ count = (pe_end - frag_end) / 8;
+ radeon_asic_vm_set_page(rdev, ib, frag_end, addr, count,
+ RADEON_GPU_PAGE_SIZE, flags);
+ }
+}
+
+/**
* radeon_vm_update_ptes - make sure that page tables are valid
*
* @rdev: radeon_device pointer
@@ -703,10 +781,9 @@
if ((last_pte + 8 * count) != pte) {
if (count) {
- radeon_asic_vm_set_page(rdev, ib, last_pte,
- last_dst, count,
- RADEON_GPU_PAGE_SIZE,
- flags);
+ radeon_vm_frag_ptes(rdev, ib, last_pte,
+ last_pte + 8 * count,
+ last_dst, flags);
}
count = nptes;
@@ -721,9 +798,9 @@
}
if (count) {
- radeon_asic_vm_set_page(rdev, ib, last_pte,
- last_dst, count,
- RADEON_GPU_PAGE_SIZE, flags);
+ radeon_vm_frag_ptes(rdev, ib, last_pte,
+ last_pte + 8 * count,
+ last_dst, flags);
}
}
@@ -887,6 +964,8 @@
*/
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
{
+ const unsigned align = min(RADEON_VM_PTB_ALIGN_SIZE,
+ RADEON_VM_PTE_COUNT * 8);
unsigned pd_size, pd_entries, pts_size;
int r;
@@ -908,7 +987,7 @@
return -ENOMEM;
}
- r = radeon_bo_create(rdev, pd_size, RADEON_VM_PTB_ALIGN_SIZE, false,
+ r = radeon_bo_create(rdev, pd_size, align, false,
RADEON_GEM_DOMAIN_VRAM, NULL,
&vm->page_directory);
if (r)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 72d3616..0a8be63 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -109,19 +109,7 @@
}
}
-void rs600_pre_page_flip(struct radeon_device *rdev, int crtc)
-{
- /* enable the pflip int */
- radeon_irq_kms_pflip_irq_get(rdev, crtc);
-}
-
-void rs600_post_page_flip(struct radeon_device *rdev, int crtc)
-{
- /* disable the pflip int */
- radeon_irq_kms_pflip_irq_put(rdev, crtc);
-}
-
-u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+void rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
@@ -148,9 +136,15 @@
/* Unlock the lock, so double-buffering can take place inside vblank */
tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+}
+
+bool rs600_page_flip_pending(struct radeon_device *rdev, int crtc_id)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
/* Return current update_pending status: */
- return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
+ return !!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) &
+ AVIVO_D1GRPH_SURFACE_UPDATE_PENDING);
}
void avivo_program_fmt(struct drm_encoder *encoder)
@@ -632,12 +626,6 @@
radeon_gart_table_vram_free(rdev);
}
-#define R600_PTE_VALID (1 << 0)
-#define R600_PTE_SYSTEM (1 << 1)
-#define R600_PTE_SNOOPED (1 << 2)
-#define R600_PTE_READABLE (1 << 5)
-#define R600_PTE_WRITEABLE (1 << 6)
-
int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
void __iomem *ptr = (void *)rdev->gart.ptr;
@@ -646,8 +634,7 @@
return -EINVAL;
}
addr = addr & 0xFFFFFFFFFFFFF000ULL;
- addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
- addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
+ addr |= R600_PTE_GART;
writeq(addr, ptr + (i * 8));
return 0;
}
@@ -787,7 +774,7 @@
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_flip(rdev, 0);
+ radeon_crtc_handle_vblank(rdev, 0);
}
if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
if (rdev->irq.crtc_vblank_int[1]) {
@@ -796,7 +783,7 @@
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_flip(rdev, 1);
+ radeon_crtc_handle_vblank(rdev, 1);
}
if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
queue_hotplug = true;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index fef3107..97b7766 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -801,7 +801,7 @@
return reference_clock;
}
-u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+void rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
@@ -835,9 +835,15 @@
/* Unlock the lock, so double-buffering can take place inside vblank */
tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+}
+
+bool rv770_page_flip_pending(struct radeon_device *rdev, int crtc_id)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
/* Return current update_pending status: */
- return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
+ return !!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) &
+ AVIVO_D1GRPH_SURFACE_UPDATE_PENDING);
}
/* get temperature in millidegrees */
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index ac708e0..5c1c0c7 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -4044,18 +4044,21 @@
WREG32(MC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
ENABLE_L1_TLB |
+ ENABLE_L1_FRAGMENT_PROCESSING |
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
ENABLE_ADVANCED_DRIVER_MODEL |
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
+ ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
EFFECTIVE_L2_QUEUE_SIZE(7) |
CONTEXT1_IDENTITY_ACCESS_MODE(1));
WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
- L2_CACHE_BIGK_FRAGMENT_SIZE(0));
+ BANK_SELECT(4) |
+ L2_CACHE_BIGK_FRAGMENT_SIZE(4));
/* setup context0 */
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
@@ -4092,6 +4095,7 @@
(u32)(rdev->dummy_page.addr >> 12));
WREG32(VM_CONTEXT1_CNTL2, 4);
WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
+ PAGE_TABLE_BLOCK_SIZE(RADEON_VM_BLOCK_SIZE - 9) |
RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
@@ -6146,7 +6150,7 @@
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_flip(rdev, 0);
+ radeon_crtc_handle_vblank(rdev, 0);
rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
}
@@ -6172,7 +6176,7 @@
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_flip(rdev, 1);
+ radeon_crtc_handle_vblank(rdev, 1);
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
}
@@ -6198,7 +6202,7 @@
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[2]))
- radeon_crtc_handle_flip(rdev, 2);
+ radeon_crtc_handle_vblank(rdev, 2);
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D3 vblank\n");
}
@@ -6224,7 +6228,7 @@
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[3]))
- radeon_crtc_handle_flip(rdev, 3);
+ radeon_crtc_handle_vblank(rdev, 3);
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D4 vblank\n");
}
@@ -6250,7 +6254,7 @@
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[4]))
- radeon_crtc_handle_flip(rdev, 4);
+ radeon_crtc_handle_vblank(rdev, 4);
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D5 vblank\n");
}
@@ -6276,7 +6280,7 @@
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[5]))
- radeon_crtc_handle_flip(rdev, 5);
+ radeon_crtc_handle_vblank(rdev, 5);
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D6 vblank\n");
}
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index cf0fdad..9521669 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -79,7 +79,25 @@
trace_radeon_vm_set_page(pe, addr, count, incr, flags);
- if (flags & R600_PTE_SYSTEM) {
+ if (flags == R600_PTE_GART) {
+ uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
+ while (count) {
+ unsigned bytes = count * 8;
+ if (bytes > 0xFFFF8)
+ bytes = 0xFFFF8;
+
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+ 1, 0, 0, bytes);
+ ib->ptr[ib->length_dw++] = pe & 0xffffffff;
+ ib->ptr[ib->length_dw++] = src & 0xffffffff;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
+
+ pe += bytes;
+ src += bytes;
+ count -= bytes / 8;
+ }
+ } else if (flags & R600_PTE_SYSTEM) {
while (count) {
ndw = count * 2;
if (ndw > 0xFFFFE)
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 683532f..da8f867 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -362,6 +362,7 @@
#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
+#define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24)
#define VM_CONTEXT1_CNTL 0x1414
#define VM_CONTEXT0_CNTL2 0x1430
#define VM_CONTEXT1_CNTL2 0x1434
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index b71bcd0..67720f7 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -1,11 +1,14 @@
config DRM_VMWGFX
tristate "DRM driver for VMware Virtual GPU"
- depends on DRM && PCI && FB
+ depends on DRM && PCI
select FB_DEFERRED_IO
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select DRM_TTM
+ # Only needed for the transitional use of drm_crtc_init - can be removed
+ # again once vmwgfx sets up the primary plane itself.
+ select DRM_KMS_HELPER
help
Choose this option if you would like to run 3D acceleration
in a VMware virtual machine.
@@ -14,7 +17,7 @@
The compiled module will be called "vmwgfx.ko".
config DRM_VMWGFX_FBCON
- depends on DRM_VMWGFX
+ depends on DRM_VMWGFX && FB
bool "Enable framebuffer console under vmwgfx by default"
help
Choose this option if you are shipping a new vmwgfx
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index ec0ae2d..6866448 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -623,7 +623,8 @@
ret = dev->bus->pm->runtime_suspend(dev);
if (ret)
return ret;
-
+ if (vgasr_priv.handler->switchto)
+ vgasr_priv.handler->switchto(VGA_SWITCHEROO_IGD);
vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_OFF);
return 0;
}
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 53ff005..6543b34 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -298,7 +298,7 @@
dev_err(drm->dev,
"[CONNECTOR:%d:%s] drm_sysfs_connector_add failed: %d\n",
connector->base.id,
- drm_get_connector_name(connector), ret);
+ connector->name, ret);
goto err_unbind;
}
}
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 12f10bc..76ccaab 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1024,14 +1024,17 @@
};
struct drm_vblank_crtc {
+ struct drm_device *dev; /* pointer to the drm_device */
wait_queue_head_t queue; /**< VBLANK wait queue */
struct timeval time[DRM_VBLANKTIME_RBSIZE]; /**< timestamp of current count */
+ struct timer_list disable_timer; /* delayed disable timer */
atomic_t count; /**< number of VBLANK interrupts */
atomic_t refcount; /* number of users of vblank interruptsper crtc */
u32 last; /* protected by dev->vbl_lock, used */
/* for wraparound handling */
u32 last_wait; /* Last vblank seqno waited per CRTC */
unsigned int inmodeset; /* Display driver is setting mode */
+ int crtc; /* crtc index */
bool enabled; /* so we don't call enable more than
once per disable */
};
@@ -1119,7 +1122,6 @@
spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */
spinlock_t vbl_lock;
- struct timer_list vblank_disable_timer;
u32 max_vblank_count; /**< size of vblank counter register */
@@ -1357,8 +1359,14 @@
extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
extern int drm_vblank_get(struct drm_device *dev, int crtc);
extern void drm_vblank_put(struct drm_device *dev, int crtc);
+extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
+extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
extern void drm_vblank_off(struct drm_device *dev, int crtc);
+extern void drm_vblank_on(struct drm_device *dev, int crtc);
+extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
+extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
extern void drm_vblank_cleanup(struct drm_device *dev);
+
extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
struct timeval *tvblank, unsigned flags);
extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 5c1c31c..6c295df 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -50,6 +50,7 @@
#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
#define DRM_MODE_OBJECT_BRIDGE 0xbdbdbdbd
+#define DRM_MODE_OBJECT_ANY 0
struct drm_mode_object {
uint32_t id;
@@ -64,6 +65,15 @@
uint64_t values[DRM_OBJECT_MAX_PROPERTY];
};
+static inline int64_t U642I64(uint64_t val)
+{
+ return (int64_t)*((int64_t *)&val);
+}
+static inline uint64_t I642U64(int64_t val)
+{
+ return (uint64_t)*((uint64_t *)&val);
+}
+
enum drm_connector_force {
DRM_FORCE_UNSPECIFIED,
DRM_FORCE_OFF,
@@ -190,6 +200,7 @@
char name[DRM_PROP_NAME_LEN];
uint32_t num_values;
uint64_t *values;
+ struct drm_device *dev;
struct list_head enum_blob_list;
};
@@ -727,6 +738,7 @@
*/
struct drm_mode_config {
struct mutex mutex; /* protects configuration (mode lists etc.) */
+ struct mutex connection_mutex; /* protects connector->encoder and encoder->crtc links */
struct mutex idr_mutex; /* for IDR management */
struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
/* this is limited to one for now */
@@ -909,7 +921,6 @@
extern void drm_encoder_cleanup(struct drm_encoder *encoder);
-extern const char *drm_get_connector_name(const struct drm_connector *connector);
extern const char *drm_get_connector_status_name(enum drm_connector_status status);
extern const char *drm_get_subpixel_order_name(enum subpixel_order order);
extern const char *drm_get_dpms_name(int val);
@@ -931,6 +942,23 @@
extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
struct edid *edid);
+
+static inline bool drm_property_type_is(struct drm_property *property,
+ uint32_t type)
+{
+ /* instanceof for props.. handles extended type vs original types: */
+ if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
+ return (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) == type;
+ return property->flags & type;
+}
+
+static inline bool drm_property_type_valid(struct drm_property *property)
+{
+ if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
+ return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
+ return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
+}
+
extern int drm_object_property_set_value(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t val);
@@ -964,6 +992,11 @@
struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
const char *name,
uint64_t min, uint64_t max);
+struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
+ int flags, const char *name,
+ int64_t min, int64_t max);
+struct drm_property *drm_property_create_object(struct drm_device *dev,
+ int flags, const char *name, uint32_t type);
extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
extern int drm_property_add_enum(struct drm_property *property, int index,
uint64_t value, const char *name);
@@ -972,7 +1005,6 @@
char *formats[]);
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
-extern const char *drm_get_encoder_name(const struct drm_encoder *encoder);
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
@@ -980,6 +1012,7 @@
int gamma_size);
extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
uint32_t id, uint32_t type);
+
/* IOCTLs */
extern int drm_mode_getresources(struct drm_device *dev,
void *data, struct drm_file *file_priv);
@@ -1063,6 +1096,15 @@
extern const char *drm_get_format_name(uint32_t format);
/* Helpers */
+
+static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_mode_object *mo;
+ mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PLANE);
+ return mo ? obj_to_plane(mo) : NULL;
+}
+
static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
uint32_t id)
{
@@ -1079,6 +1121,30 @@
return mo ? obj_to_encoder(mo) : NULL;
}
+static inline struct drm_connector *drm_connector_find(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_mode_object *mo;
+ mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CONNECTOR);
+ return mo ? obj_to_connector(mo) : NULL;
+}
+
+static inline struct drm_property *drm_property_find(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_mode_object *mo;
+ mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PROPERTY);
+ return mo ? obj_to_property(mo) : NULL;
+}
+
+static inline struct drm_property_blob *
+drm_property_blob_find(struct drm_device *dev, uint32_t id)
+{
+ struct drm_mode_object *mo;
+ mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_BLOB);
+ return mo ? obj_to_blob(mo) : NULL;
+}
+
/* Plane list iterator for legacy (overlay only) planes. */
#define drm_for_each_legacy_plane(plane, planelist) \
list_for_each_entry(plane, planelist, head) \
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index a1441c5..b96031d 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -202,6 +202,11 @@
#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6)
#define DRM_EDID_FEATURE_PM_STANDBY (1 << 7)
+#define DRM_EDID_HDMI_DC_48 (1 << 6)
+#define DRM_EDID_HDMI_DC_36 (1 << 5)
+#define DRM_EDID_HDMI_DC_30 (1 << 4)
+#define DRM_EDID_HDMI_DC_Y444 (1 << 3)
+
struct edid {
u8 header[8];
/* Vendor & product info */
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 24f3cad..d18f31a 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -245,4 +245,10 @@
INTEL_BDW_GT12D_IDS(info), \
INTEL_BDW_GT3D_IDS(info)
+#define INTEL_CHV_IDS(info) \
+ INTEL_VGA_DEVICE(0x22b0, info), \
+ INTEL_VGA_DEVICE(0x22b1, info), \
+ INTEL_VGA_DEVICE(0x22b2, info), \
+ INTEL_VGA_DEVICE(0x22b3, info)
+
#endif /* _I915_PCIIDS_H */
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 719add4..def54f9 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -252,6 +252,21 @@
#define DRM_MODE_PROP_BLOB (1<<4)
#define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */
+/* non-extended types: legacy bitmask, one bit per type: */
+#define DRM_MODE_PROP_LEGACY_TYPE ( \
+ DRM_MODE_PROP_RANGE | \
+ DRM_MODE_PROP_ENUM | \
+ DRM_MODE_PROP_BLOB | \
+ DRM_MODE_PROP_BITMASK)
+
+/* extended-types: rather than continue to consume a bit per type,
+ * grab a chunk of the bits to use as integer type id.
+ */
+#define DRM_MODE_PROP_EXTENDED_TYPE 0x0000ffc0
+#define DRM_MODE_PROP_TYPE(n) ((n) << 6)
+#define DRM_MODE_PROP_OBJECT DRM_MODE_PROP_TYPE(1)
+#define DRM_MODE_PROP_SIGNED_RANGE DRM_MODE_PROP_TYPE(2)
+
struct drm_mode_property_enum {
__u64 value;
char name[DRM_PROP_NAME_LEN];
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 8a3e4ef00..ff57f07 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -223,6 +223,7 @@
#define DRM_I915_GEM_GET_CACHING 0x30
#define DRM_I915_REG_READ 0x31
#define DRM_I915_GET_RESET_STATS 0x32
+#define DRM_I915_GEM_USERPTR 0x33
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -273,6 +274,7 @@
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
+#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -1050,4 +1052,18 @@
__u32 pad;
};
+struct drm_i915_gem_userptr {
+ __u64 user_ptr;
+ __u64 user_size;
+ __u32 flags;
+#define I915_USERPTR_READ_ONLY 0x1
+#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+};
+
#endif /* _UAPI_I915_DRM_H_ */