Merge branch 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel

* 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel: (41 commits)
  drm/i915: add HAS_BSD check to i915_getparam
  drm/i915: Honor sync polarity from VBT panel timing descriptors
  drm/i915: Unmask interrupt for render engine on Sandybridge
  drm/i915: Fix PIPE_CONTROL command on Sandybridge
  drm/i915: Fix up address spaces in slow_kernel_write()
  drm/i915: Use non-atomic kmap for slow copy paths
  drm/i915: Avoid moving from CPU domain during pwrite
  drm/i915: Cleanup after failed initialization of ringbuffers
  drm/i915: Reject bind_to_gtt() early if object > aperture
  drm/i915: Check error code whilst moving buffer to GTT domain.
  drm/i915: Remove spurious warning "Failure to install fence"
  drm/i915: Rebind bo if currently bound with incorrect alignment.
  drm/i915: Include pitch in set_base debug statement.
  drm/i915: Only print "nothing to do" debug message as required.
  drm/i915: Propagate error from unbinding an unfenceable object.
  drm/i915: Avoid nesting of domain changes when setting display plane
  drm/i915: Hold the spinlock whilst resetting unpin_work along error path
  drm/i915: Only print an message if there was an error
  drm/i915: Clean up leftover bits from hws move to ring structure.
  drm/i915: Add CxSR support on Pineview DDR3
  ...
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index e8ea682..9344216 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1059,7 +1059,7 @@
 	}
 }
 
-static int intel_i915_configure(void)
+static int intel_i9xx_configure(void)
 {
 	struct aper_size_info_fixed *current_size;
 	u32 temp;
@@ -1207,6 +1207,38 @@
 	return 0;
 }
 
+static int intel_i915_get_gtt_size(void)
+{
+	int size;
+
+	if (IS_G33) {
+		u16 gmch_ctrl;
+
+		/* G33's GTT size defined in gmch_ctrl */
+		pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+		switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
+		case G33_PGETBL_SIZE_1M:
+			size = 1024;
+			break;
+		case G33_PGETBL_SIZE_2M:
+			size = 2048;
+			break;
+		default:
+			dev_info(&agp_bridge->dev->dev,
+				 "unknown page table size 0x%x, assuming 512KB\n",
+				(gmch_ctrl & G33_PGETBL_SIZE_MASK));
+			size = 512;
+		}
+	} else {
+		/* On previous hardware, the GTT size was just what was
+		 * required to map the aperture.
+		 */
+		size = agp_bridge->driver->fetch_size();
+	}
+
+	return KB(size);
+}
+
 /* The intel i915 automatically initializes the agp aperture during POST.
  * Use the memory already set aside for in the GTT.
  */
@@ -1216,7 +1248,7 @@
 	struct aper_size_info_fixed *size;
 	int num_entries;
 	u32 temp, temp2;
-	int gtt_map_size = 256 * 1024;
+	int gtt_map_size;
 
 	size = agp_bridge->current_size;
 	page_order = size->page_order;
@@ -1226,8 +1258,8 @@
 	pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
 	pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
 
-	if (IS_G33)
-	    gtt_map_size = 1024 * 1024; /* 1M on G33 */
+	gtt_map_size = intel_i915_get_gtt_size();
+
 	intel_private.gtt = ioremap(temp2, gtt_map_size);
 	if (!intel_private.gtt)
 		return -ENOMEM;
@@ -1422,7 +1454,7 @@
 	.size_type		= FIXED_APER_SIZE,
 	.num_aperture_sizes	= 4,
 	.needs_scratch_page	= true,
-	.configure		= intel_i915_configure,
+	.configure		= intel_i9xx_configure,
 	.fetch_size		= intel_i9xx_fetch_size,
 	.cleanup		= intel_i915_cleanup,
 	.mask_memory		= intel_i810_mask_memory,
@@ -1455,7 +1487,7 @@
 	.size_type		= FIXED_APER_SIZE,
 	.num_aperture_sizes	= 4,
 	.needs_scratch_page	= true,
-	.configure		= intel_i915_configure,
+	.configure		= intel_i9xx_configure,
 	.fetch_size		= intel_i9xx_fetch_size,
 	.cleanup		= intel_i915_cleanup,
 	.mask_memory		= intel_i965_mask_memory,
@@ -1488,7 +1520,7 @@
 	.size_type		= FIXED_APER_SIZE,
 	.num_aperture_sizes	= 4,
 	.needs_scratch_page	= true,
-	.configure		= intel_i915_configure,
+	.configure		= intel_i9xx_configure,
 	.fetch_size		= intel_i9xx_fetch_size,
 	.cleanup		= intel_i915_cleanup,
 	.mask_memory		= intel_i965_mask_memory,
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 9563901..da78f2c 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -22,6 +22,7 @@
 	  intel_fb.o \
 	  intel_tv.o \
 	  intel_dvo.o \
+	  intel_ringbuffer.o \
 	  intel_overlay.o \
 	  dvo_ch7xxx.o \
 	  dvo_ch7017.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 322070c..52510ad 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -77,7 +77,7 @@
 	case ACTIVE_LIST:
 		seq_printf(m, "Active:\n");
 		lock = &dev_priv->mm.active_list_lock;
-		head = &dev_priv->mm.active_list;
+		head = &dev_priv->render_ring.active_list;
 		break;
 	case INACTIVE_LIST:
 		seq_printf(m, "Inactive:\n");
@@ -129,7 +129,8 @@
 	struct drm_i915_gem_request *gem_request;
 
 	seq_printf(m, "Request:\n");
-	list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) {
+	list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
+			list) {
 		seq_printf(m, "    %d @ %d\n",
 			   gem_request->seqno,
 			   (int) (jiffies - gem_request->emitted_jiffies));
@@ -143,9 +144,9 @@
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
-	if (dev_priv->hw_status_page != NULL) {
+	if (dev_priv->render_ring.status_page.page_addr != NULL) {
 		seq_printf(m, "Current sequence: %d\n",
-			   i915_get_gem_seqno(dev));
+			   i915_get_gem_seqno(dev,  &dev_priv->render_ring));
 	} else {
 		seq_printf(m, "Current sequence: hws uninitialized\n");
 	}
@@ -195,9 +196,9 @@
 	}
 	seq_printf(m, "Interrupts received: %d\n",
 		   atomic_read(&dev_priv->irq_received));
-	if (dev_priv->hw_status_page != NULL) {
+	if (dev_priv->render_ring.status_page.page_addr != NULL) {
 		seq_printf(m, "Current sequence:    %d\n",
-			   i915_get_gem_seqno(dev));
+			   i915_get_gem_seqno(dev,  &dev_priv->render_ring));
 	} else {
 		seq_printf(m, "Current sequence:    hws uninitialized\n");
 	}
@@ -251,7 +252,7 @@
 	int i;
 	volatile u32 *hws;
 
-	hws = (volatile u32 *)dev_priv->hw_status_page;
+	hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr;
 	if (hws == NULL)
 		return 0;
 
@@ -287,7 +288,8 @@
 
 	spin_lock(&dev_priv->mm.active_list_lock);
 
-	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+	list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
+			list) {
 		obj = &obj_priv->base;
 		if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
 		    ret = i915_gem_object_get_pages(obj, 0);
@@ -317,14 +319,14 @@
 	u8 *virt;
 	uint32_t *ptr, off;
 
-	if (!dev_priv->ring.ring_obj) {
+	if (!dev_priv->render_ring.gem_object) {
 		seq_printf(m, "No ringbuffer setup\n");
 		return 0;
 	}
 
-	virt = dev_priv->ring.virtual_start;
+	virt = dev_priv->render_ring.virtual_start;
 
-	for (off = 0; off < dev_priv->ring.Size; off += 4) {
+	for (off = 0; off < dev_priv->render_ring.size; off += 4) {
 		ptr = (uint32_t *)(virt + off);
 		seq_printf(m, "%08x :  %08x\n", off, *ptr);
 	}
@@ -344,7 +346,7 @@
 
 	seq_printf(m, "RingHead :  %08x\n", head);
 	seq_printf(m, "RingTail :  %08x\n", tail);
-	seq_printf(m, "RingSize :  %08lx\n", dev_priv->ring.Size);
+	seq_printf(m, "RingSize :  %08lx\n", dev_priv->render_ring.size);
 	seq_printf(m, "Acthd :     %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
 
 	return 0;
@@ -489,11 +491,14 @@
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u16 rgvswctl = I915_READ16(MEMSWCTL);
+	u16 rgvstat = I915_READ16(MEMSTAT_ILK);
 
-	seq_printf(m, "Last command: 0x%01x\n", (rgvswctl >> 13) & 0x3);
-	seq_printf(m, "Command status: %d\n", (rgvswctl >> 12) & 1);
-	seq_printf(m, "P%d DELAY 0x%02x\n", (rgvswctl >> 8) & 0xf,
-		   rgvswctl & 0x3f);
+	seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
+	seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
+	seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
+		   MEMSTAT_VID_SHIFT);
+	seq_printf(m, "Current P-state: %d\n",
+		   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
 
 	return 0;
 }
@@ -508,7 +513,8 @@
 
 	for (i = 0; i < 16; i++) {
 		delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
-		seq_printf(m, "P%02dVIDFREQ: 0x%08x\n", i, delayfreq);
+		seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
+			   (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
 	}
 
 	return 0;
@@ -541,6 +547,8 @@
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 rgvmodectl = I915_READ(MEMMODECTL);
+	u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY);
+	u16 crstandvid = I915_READ16(CRSTANDVID);
 
 	seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
 		   "yes" : "no");
@@ -555,9 +563,13 @@
 		   rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
 	seq_printf(m, "Starting frequency: P%d\n",
 		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
-	seq_printf(m, "Max frequency: P%d\n",
+	seq_printf(m, "Max P-state: P%d\n",
 		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
-	seq_printf(m, "Min frequency: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
+	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
+	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
+	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
+	seq_printf(m, "Render standby enabled: %s\n",
+		   (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
 
 	return 0;
 }
@@ -621,6 +633,36 @@
 	return 0;
 }
 
+static int i915_emon_status(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned long temp, chipset, gfx;
+
+	temp = i915_mch_val(dev_priv);
+	chipset = i915_chipset_val(dev_priv);
+	gfx = i915_gfx_val(dev_priv);
+
+	seq_printf(m, "GMCH temp: %ld\n", temp);
+	seq_printf(m, "Chipset power: %ld\n", chipset);
+	seq_printf(m, "GFX power: %ld\n", gfx);
+	seq_printf(m, "Total power: %ld\n", chipset + gfx);
+
+	return 0;
+}
+
+static int i915_gfxec(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
+
+	return 0;
+}
+
 static int
 i915_wedged_open(struct inode *inode,
 		 struct file *filp)
@@ -743,6 +785,8 @@
 	{"i915_delayfreq_table", i915_delayfreq_table, 0},
 	{"i915_inttoext_table", i915_inttoext_table, 0},
 	{"i915_drpc_info", i915_drpc_info, 0},
+	{"i915_emon_status", i915_emon_status, 0},
+	{"i915_gfxec", i915_gfxec, 0},
 	{"i915_fbc_status", i915_fbc_status, 0},
 	{"i915_sr_status", i915_sr_status, 0},
 };
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2a6b5de..84ce956 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -40,84 +40,6 @@
 #include <linux/vga_switcheroo.h>
 #include <linux/slab.h>
 
-/* Really want an OS-independent resettable timer.  Would like to have
- * this loop run for (eg) 3 sec, but have the timer reset every time
- * the head pointer changes, so that EBUSY only happens if the ring
- * actually stalls for (eg) 3 seconds.
- */
-int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
-	u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
-	u32 last_acthd = I915_READ(acthd_reg);
-	u32 acthd;
-	u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-	int i;
-
-	trace_i915_ring_wait_begin (dev);
-
-	for (i = 0; i < 100000; i++) {
-		ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-		acthd = I915_READ(acthd_reg);
-		ring->space = ring->head - (ring->tail + 8);
-		if (ring->space < 0)
-			ring->space += ring->Size;
-		if (ring->space >= n) {
-			trace_i915_ring_wait_end (dev);
-			return 0;
-		}
-
-		if (dev->primary->master) {
-			struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-			if (master_priv->sarea_priv)
-				master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
-		}
-
-
-		if (ring->head != last_head)
-			i = 0;
-		if (acthd != last_acthd)
-			i = 0;
-
-		last_head = ring->head;
-		last_acthd = acthd;
-		msleep_interruptible(10);
-
-	}
-
-	trace_i915_ring_wait_end (dev);
-	return -EBUSY;
-}
-
-/* As a ringbuffer is only allowed to wrap between instructions, fill
- * the tail with NOOPs.
- */
-int i915_wrap_ring(struct drm_device *dev)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	volatile unsigned int *virt;
-	int rem;
-
-	rem = dev_priv->ring.Size - dev_priv->ring.tail;
-	if (dev_priv->ring.space < rem) {
-		int ret = i915_wait_ring(dev, rem, __func__);
-		if (ret)
-			return ret;
-	}
-	dev_priv->ring.space -= rem;
-
-	virt = (unsigned int *)
-		(dev_priv->ring.virtual_start + dev_priv->ring.tail);
-	rem /= 4;
-	while (rem--)
-		*virt++ = MI_NOOP;
-
-	dev_priv->ring.tail = 0;
-
-	return 0;
-}
-
 /**
  * Sets up the hardware status page for devices that need a physical address
  * in the register.
@@ -133,10 +55,11 @@
 		DRM_ERROR("Can not allocate hardware status page\n");
 		return -ENOMEM;
 	}
-	dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
+	dev_priv->render_ring.status_page.page_addr
+		= dev_priv->status_page_dmah->vaddr;
 	dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
 
-	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+	memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
 
 	if (IS_I965G(dev))
 		dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
@@ -159,8 +82,8 @@
 		dev_priv->status_page_dmah = NULL;
 	}
 
-	if (dev_priv->status_gfx_addr) {
-		dev_priv->status_gfx_addr = 0;
+	if (dev_priv->render_ring.status_page.gfx_addr) {
+		dev_priv->render_ring.status_page.gfx_addr = 0;
 		drm_core_ioremapfree(&dev_priv->hws_map, dev);
 	}
 
@@ -172,7 +95,7 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_master_private *master_priv;
-	drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+	struct intel_ring_buffer *ring = &dev_priv->render_ring;
 
 	/*
 	 * We should never lose context on the ring with modesetting
@@ -185,7 +108,7 @@
 	ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
 	ring->space = ring->head - (ring->tail + 8);
 	if (ring->space < 0)
-		ring->space += ring->Size;
+		ring->space += ring->size;
 
 	if (!dev->primary->master)
 		return;
@@ -205,12 +128,9 @@
 	if (dev->irq_enabled)
 		drm_irq_uninstall(dev);
 
-	if (dev_priv->ring.virtual_start) {
-		drm_core_ioremapfree(&dev_priv->ring.map, dev);
-		dev_priv->ring.virtual_start = NULL;
-		dev_priv->ring.map.handle = NULL;
-		dev_priv->ring.map.size = 0;
-	}
+	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+	if (HAS_BSD(dev))
+		intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
 
 	/* Clear the HWS virtual address at teardown */
 	if (I915_NEED_GFX_HWS(dev))
@@ -233,24 +153,24 @@
 	}
 
 	if (init->ring_size != 0) {
-		if (dev_priv->ring.ring_obj != NULL) {
+		if (dev_priv->render_ring.gem_object != NULL) {
 			i915_dma_cleanup(dev);
 			DRM_ERROR("Client tried to initialize ringbuffer in "
 				  "GEM mode\n");
 			return -EINVAL;
 		}
 
-		dev_priv->ring.Size = init->ring_size;
+		dev_priv->render_ring.size = init->ring_size;
 
-		dev_priv->ring.map.offset = init->ring_start;
-		dev_priv->ring.map.size = init->ring_size;
-		dev_priv->ring.map.type = 0;
-		dev_priv->ring.map.flags = 0;
-		dev_priv->ring.map.mtrr = 0;
+		dev_priv->render_ring.map.offset = init->ring_start;
+		dev_priv->render_ring.map.size = init->ring_size;
+		dev_priv->render_ring.map.type = 0;
+		dev_priv->render_ring.map.flags = 0;
+		dev_priv->render_ring.map.mtrr = 0;
 
-		drm_core_ioremap_wc(&dev_priv->ring.map, dev);
+		drm_core_ioremap_wc(&dev_priv->render_ring.map, dev);
 
-		if (dev_priv->ring.map.handle == NULL) {
+		if (dev_priv->render_ring.map.handle == NULL) {
 			i915_dma_cleanup(dev);
 			DRM_ERROR("can not ioremap virtual address for"
 				  " ring buffer\n");
@@ -258,7 +178,7 @@
 		}
 	}
 
-	dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+	dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle;
 
 	dev_priv->cpp = init->cpp;
 	dev_priv->back_offset = init->back_offset;
@@ -278,26 +198,29 @@
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
+	struct intel_ring_buffer *ring;
 	DRM_DEBUG_DRIVER("%s\n", __func__);
 
-	if (dev_priv->ring.map.handle == NULL) {
+	ring = &dev_priv->render_ring;
+
+	if (ring->map.handle == NULL) {
 		DRM_ERROR("can not ioremap virtual address for"
 			  " ring buffer\n");
 		return -ENOMEM;
 	}
 
 	/* Program Hardware Status Page */
-	if (!dev_priv->hw_status_page) {
+	if (!ring->status_page.page_addr) {
 		DRM_ERROR("Can not find hardware status page\n");
 		return -EINVAL;
 	}
 	DRM_DEBUG_DRIVER("hw status page @ %p\n",
-				dev_priv->hw_status_page);
-
-	if (dev_priv->status_gfx_addr != 0)
-		I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+				ring->status_page.page_addr);
+	if (ring->status_page.gfx_addr != 0)
+		ring->setup_status_page(dev, ring);
 	else
 		I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
+
 	DRM_DEBUG_DRIVER("Enabled hardware status page\n");
 
 	return 0;
@@ -407,9 +330,8 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	int i;
-	RING_LOCALS;
 
-	if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
+	if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8)
 		return -EINVAL;
 
 	BEGIN_LP_RING((dwords+1)&~1);
@@ -442,9 +364,7 @@
 	      struct drm_clip_rect *boxes,
 	      int i, int DR1, int DR4)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_clip_rect box = boxes[i];
-	RING_LOCALS;
 
 	if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
 		DRM_ERROR("Bad box %d,%d..%d,%d\n",
@@ -481,7 +401,6 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-	RING_LOCALS;
 
 	dev_priv->counter++;
 	if (dev_priv->counter > 0x7FFFFFFFUL)
@@ -535,10 +454,8 @@
 				     drm_i915_batchbuffer_t * batch,
 				     struct drm_clip_rect *cliprects)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
 	int nbox = batch->num_cliprects;
 	int i = 0, count;
-	RING_LOCALS;
 
 	if ((batch->start | batch->used) & 0x7) {
 		DRM_ERROR("alignment");
@@ -587,7 +504,6 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_master_private *master_priv =
 		dev->primary->master->driver_priv;
-	RING_LOCALS;
 
 	if (!master_priv->sarea_priv)
 		return -EINVAL;
@@ -640,7 +556,8 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
 	i915_kernel_lost_context(dev);
-	return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
+	return intel_wait_ring_buffer(dev, &dev_priv->render_ring,
+				      dev_priv->render_ring.size - 8);
 }
 
 static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -827,6 +744,9 @@
 		/* depends on GEM */
 		value = dev_priv->has_gem;
 		break;
+	case I915_PARAM_HAS_BSD:
+		value = HAS_BSD(dev);
+		break;
 	default:
 		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
 				 param->param);
@@ -882,6 +802,7 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_hws_addr_t *hws = data;
+	struct intel_ring_buffer *ring = &dev_priv->render_ring;
 
 	if (!I915_NEED_GFX_HWS(dev))
 		return -EINVAL;
@@ -898,7 +819,7 @@
 
 	DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
 
-	dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
+	ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
 
 	dev_priv->hws_map.offset = dev->agp->base + hws->addr;
 	dev_priv->hws_map.size = 4*1024;
@@ -909,19 +830,19 @@
 	drm_core_ioremap_wc(&dev_priv->hws_map, dev);
 	if (dev_priv->hws_map.handle == NULL) {
 		i915_dma_cleanup(dev);
-		dev_priv->status_gfx_addr = 0;
+		ring->status_page.gfx_addr = 0;
 		DRM_ERROR("can not ioremap virtual address for"
 				" G33 hw status page\n");
 		return -ENOMEM;
 	}
-	dev_priv->hw_status_page = dev_priv->hws_map.handle;
+	ring->status_page.page_addr = dev_priv->hws_map.handle;
+	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+	I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
 
-	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-	I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
 	DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
-				dev_priv->status_gfx_addr);
+			 ring->status_page.gfx_addr);
 	DRM_DEBUG_DRIVER("load hws at %p\n",
-				dev_priv->hw_status_page);
+			 ring->status_page.page_addr);
 	return 0;
 }
 
@@ -1539,14 +1460,11 @@
 	master->driver_priv = NULL;
 }
 
-static void i915_get_mem_freq(struct drm_device *dev)
+static void i915_pineview_get_mem_freq(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 tmp;
 
-	if (!IS_PINEVIEW(dev))
-		return;
-
 	tmp = I915_READ(CLKCFG);
 
 	switch (tmp & CLKCFG_FSB_MASK) {
@@ -1575,8 +1493,525 @@
 		dev_priv->mem_freq = 800;
 		break;
 	}
+
+	/* detect pineview DDR3 setting */
+	tmp = I915_READ(CSHRDDR3CTL);
+	dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
 }
 
+static void i915_ironlake_get_mem_freq(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	u16 ddrpll, csipll;
+
+	ddrpll = I915_READ16(DDRMPLL1);
+	csipll = I915_READ16(CSIPLL0);
+
+	switch (ddrpll & 0xff) {
+	case 0xc:
+		dev_priv->mem_freq = 800;
+		break;
+	case 0x10:
+		dev_priv->mem_freq = 1066;
+		break;
+	case 0x14:
+		dev_priv->mem_freq = 1333;
+		break;
+	case 0x18:
+		dev_priv->mem_freq = 1600;
+		break;
+	default:
+		DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
+				 ddrpll & 0xff);
+		dev_priv->mem_freq = 0;
+		break;
+	}
+
+	dev_priv->r_t = dev_priv->mem_freq;
+
+	switch (csipll & 0x3ff) {
+	case 0x00c:
+		dev_priv->fsb_freq = 3200;
+		break;
+	case 0x00e:
+		dev_priv->fsb_freq = 3733;
+		break;
+	case 0x010:
+		dev_priv->fsb_freq = 4266;
+		break;
+	case 0x012:
+		dev_priv->fsb_freq = 4800;
+		break;
+	case 0x014:
+		dev_priv->fsb_freq = 5333;
+		break;
+	case 0x016:
+		dev_priv->fsb_freq = 5866;
+		break;
+	case 0x018:
+		dev_priv->fsb_freq = 6400;
+		break;
+	default:
+		DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
+				 csipll & 0x3ff);
+		dev_priv->fsb_freq = 0;
+		break;
+	}
+
+	if (dev_priv->fsb_freq == 3200) {
+		dev_priv->c_m = 0;
+	} else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
+		dev_priv->c_m = 1;
+	} else {
+		dev_priv->c_m = 2;
+	}
+}
+
+struct v_table {
+	u8 vid;
+	unsigned long vd; /* in .1 mil */
+	unsigned long vm; /* in .1 mil */
+	u8 pvid;
+};
+
+static struct v_table v_table[] = {
+	{ 0, 16125, 15000, 0x7f, },
+	{ 1, 16000, 14875, 0x7e, },
+	{ 2, 15875, 14750, 0x7d, },
+	{ 3, 15750, 14625, 0x7c, },
+	{ 4, 15625, 14500, 0x7b, },
+	{ 5, 15500, 14375, 0x7a, },
+	{ 6, 15375, 14250, 0x79, },
+	{ 7, 15250, 14125, 0x78, },
+	{ 8, 15125, 14000, 0x77, },
+	{ 9, 15000, 13875, 0x76, },
+	{ 10, 14875, 13750, 0x75, },
+	{ 11, 14750, 13625, 0x74, },
+	{ 12, 14625, 13500, 0x73, },
+	{ 13, 14500, 13375, 0x72, },
+	{ 14, 14375, 13250, 0x71, },
+	{ 15, 14250, 13125, 0x70, },
+	{ 16, 14125, 13000, 0x6f, },
+	{ 17, 14000, 12875, 0x6e, },
+	{ 18, 13875, 12750, 0x6d, },
+	{ 19, 13750, 12625, 0x6c, },
+	{ 20, 13625, 12500, 0x6b, },
+	{ 21, 13500, 12375, 0x6a, },
+	{ 22, 13375, 12250, 0x69, },
+	{ 23, 13250, 12125, 0x68, },
+	{ 24, 13125, 12000, 0x67, },
+	{ 25, 13000, 11875, 0x66, },
+	{ 26, 12875, 11750, 0x65, },
+	{ 27, 12750, 11625, 0x64, },
+	{ 28, 12625, 11500, 0x63, },
+	{ 29, 12500, 11375, 0x62, },
+	{ 30, 12375, 11250, 0x61, },
+	{ 31, 12250, 11125, 0x60, },
+	{ 32, 12125, 11000, 0x5f, },
+	{ 33, 12000, 10875, 0x5e, },
+	{ 34, 11875, 10750, 0x5d, },
+	{ 35, 11750, 10625, 0x5c, },
+	{ 36, 11625, 10500, 0x5b, },
+	{ 37, 11500, 10375, 0x5a, },
+	{ 38, 11375, 10250, 0x59, },
+	{ 39, 11250, 10125, 0x58, },
+	{ 40, 11125, 10000, 0x57, },
+	{ 41, 11000, 9875, 0x56, },
+	{ 42, 10875, 9750, 0x55, },
+	{ 43, 10750, 9625, 0x54, },
+	{ 44, 10625, 9500, 0x53, },
+	{ 45, 10500, 9375, 0x52, },
+	{ 46, 10375, 9250, 0x51, },
+	{ 47, 10250, 9125, 0x50, },
+	{ 48, 10125, 9000, 0x4f, },
+	{ 49, 10000, 8875, 0x4e, },
+	{ 50, 9875, 8750, 0x4d, },
+	{ 51, 9750, 8625, 0x4c, },
+	{ 52, 9625, 8500, 0x4b, },
+	{ 53, 9500, 8375, 0x4a, },
+	{ 54, 9375, 8250, 0x49, },
+	{ 55, 9250, 8125, 0x48, },
+	{ 56, 9125, 8000, 0x47, },
+	{ 57, 9000, 7875, 0x46, },
+	{ 58, 8875, 7750, 0x45, },
+	{ 59, 8750, 7625, 0x44, },
+	{ 60, 8625, 7500, 0x43, },
+	{ 61, 8500, 7375, 0x42, },
+	{ 62, 8375, 7250, 0x41, },
+	{ 63, 8250, 7125, 0x40, },
+	{ 64, 8125, 7000, 0x3f, },
+	{ 65, 8000, 6875, 0x3e, },
+	{ 66, 7875, 6750, 0x3d, },
+	{ 67, 7750, 6625, 0x3c, },
+	{ 68, 7625, 6500, 0x3b, },
+	{ 69, 7500, 6375, 0x3a, },
+	{ 70, 7375, 6250, 0x39, },
+	{ 71, 7250, 6125, 0x38, },
+	{ 72, 7125, 6000, 0x37, },
+	{ 73, 7000, 5875, 0x36, },
+	{ 74, 6875, 5750, 0x35, },
+	{ 75, 6750, 5625, 0x34, },
+	{ 76, 6625, 5500, 0x33, },
+	{ 77, 6500, 5375, 0x32, },
+	{ 78, 6375, 5250, 0x31, },
+	{ 79, 6250, 5125, 0x30, },
+	{ 80, 6125, 5000, 0x2f, },
+	{ 81, 6000, 4875, 0x2e, },
+	{ 82, 5875, 4750, 0x2d, },
+	{ 83, 5750, 4625, 0x2c, },
+	{ 84, 5625, 4500, 0x2b, },
+	{ 85, 5500, 4375, 0x2a, },
+	{ 86, 5375, 4250, 0x29, },
+	{ 87, 5250, 4125, 0x28, },
+	{ 88, 5125, 4000, 0x27, },
+	{ 89, 5000, 3875, 0x26, },
+	{ 90, 4875, 3750, 0x25, },
+	{ 91, 4750, 3625, 0x24, },
+	{ 92, 4625, 3500, 0x23, },
+	{ 93, 4500, 3375, 0x22, },
+	{ 94, 4375, 3250, 0x21, },
+	{ 95, 4250, 3125, 0x20, },
+	{ 96, 4125, 3000, 0x1f, },
+	{ 97, 4125, 3000, 0x1e, },
+	{ 98, 4125, 3000, 0x1d, },
+	{ 99, 4125, 3000, 0x1c, },
+	{ 100, 4125, 3000, 0x1b, },
+	{ 101, 4125, 3000, 0x1a, },
+	{ 102, 4125, 3000, 0x19, },
+	{ 103, 4125, 3000, 0x18, },
+	{ 104, 4125, 3000, 0x17, },
+	{ 105, 4125, 3000, 0x16, },
+	{ 106, 4125, 3000, 0x15, },
+	{ 107, 4125, 3000, 0x14, },
+	{ 108, 4125, 3000, 0x13, },
+	{ 109, 4125, 3000, 0x12, },
+	{ 110, 4125, 3000, 0x11, },
+	{ 111, 4125, 3000, 0x10, },
+	{ 112, 4125, 3000, 0x0f, },
+	{ 113, 4125, 3000, 0x0e, },
+	{ 114, 4125, 3000, 0x0d, },
+	{ 115, 4125, 3000, 0x0c, },
+	{ 116, 4125, 3000, 0x0b, },
+	{ 117, 4125, 3000, 0x0a, },
+	{ 118, 4125, 3000, 0x09, },
+	{ 119, 4125, 3000, 0x08, },
+	{ 120, 1125, 0, 0x07, },
+	{ 121, 1000, 0, 0x06, },
+	{ 122, 875, 0, 0x05, },
+	{ 123, 750, 0, 0x04, },
+	{ 124, 625, 0, 0x03, },
+	{ 125, 500, 0, 0x02, },
+	{ 126, 375, 0, 0x01, },
+	{ 127, 0, 0, 0x00, },
+};
+
+struct cparams {
+	int i;
+	int t;
+	int m;
+	int c;
+};
+
+static struct cparams cparams[] = {
+	{ 1, 1333, 301, 28664 },
+	{ 1, 1066, 294, 24460 },
+	{ 1, 800, 294, 25192 },
+	{ 0, 1333, 276, 27605 },
+	{ 0, 1066, 276, 27605 },
+	{ 0, 800, 231, 23784 },
+};
+
+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+{
+	u64 total_count, diff, ret;
+	u32 count1, count2, count3, m = 0, c = 0;
+	unsigned long now = jiffies_to_msecs(jiffies), diff1;
+	int i;
+
+	diff1 = now - dev_priv->last_time1;
+
+	count1 = I915_READ(DMIEC);
+	count2 = I915_READ(DDREC);
+	count3 = I915_READ(CSIEC);
+
+	total_count = count1 + count2 + count3;
+
+	/* FIXME: handle per-counter overflow */
+	if (total_count < dev_priv->last_count1) {
+		diff = ~0UL - dev_priv->last_count1;
+		diff += total_count;
+	} else {
+		diff = total_count - dev_priv->last_count1;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(cparams); i++) {
+		if (cparams[i].i == dev_priv->c_m &&
+		    cparams[i].t == dev_priv->r_t) {
+			m = cparams[i].m;
+			c = cparams[i].c;
+			break;
+		}
+	}
+
+	div_u64(diff, diff1);
+	ret = ((m * diff) + c);
+	div_u64(ret, 10);
+
+	dev_priv->last_count1 = total_count;
+	dev_priv->last_time1 = now;
+
+	return ret;
+}
+
+unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
+{
+	unsigned long m, x, b;
+	u32 tsfs;
+
+	tsfs = I915_READ(TSFS);
+
+	m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
+	x = I915_READ8(TR1);
+
+	b = tsfs & TSFS_INTR_MASK;
+
+	return ((m * x) / 127) - b;
+}
+
+static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
+{
+	unsigned long val = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(v_table); i++) {
+		if (v_table[i].pvid == pxvid) {
+			if (IS_MOBILE(dev_priv->dev))
+				val = v_table[i].vm;
+			else
+				val = v_table[i].vd;
+		}
+	}
+
+	return val;
+}
+
+void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+{
+	struct timespec now, diff1;
+	u64 diff;
+	unsigned long diffms;
+	u32 count;
+
+	getrawmonotonic(&now);
+	diff1 = timespec_sub(now, dev_priv->last_time2);
+
+	/* Don't divide by 0 */
+	diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
+	if (!diffms)
+		return;
+
+	count = I915_READ(GFXEC);
+
+	if (count < dev_priv->last_count2) {
+		diff = ~0UL - dev_priv->last_count2;
+		diff += count;
+	} else {
+		diff = count - dev_priv->last_count2;
+	}
+
+	dev_priv->last_count2 = count;
+	dev_priv->last_time2 = now;
+
+	/* More magic constants... */
+	diff = diff * 1181;
+	div_u64(diff, diffms * 10);
+	dev_priv->gfx_power = diff;
+}
+
+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+{
+	unsigned long t, corr, state1, corr2, state2;
+	u32 pxvid, ext_v;
+
+	pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
+	pxvid = (pxvid >> 24) & 0x7f;
+	ext_v = pvid_to_extvid(dev_priv, pxvid);
+
+	state1 = ext_v;
+
+	t = i915_mch_val(dev_priv);
+
+	/* Revel in the empirically derived constants */
+
+	/* Correction factor in 1/100000 units */
+	if (t > 80)
+		corr = ((t * 2349) + 135940);
+	else if (t >= 50)
+		corr = ((t * 964) + 29317);
+	else /* < 50 */
+		corr = ((t * 301) + 1004);
+
+	corr = corr * ((150142 * state1) / 10000 - 78642);
+	corr /= 100000;
+	corr2 = (corr * dev_priv->corr);
+
+	state2 = (corr2 * state1) / 10000;
+	state2 /= 100; /* convert to mW */
+
+	i915_update_gfx_val(dev_priv);
+
+	return dev_priv->gfx_power + state2;
+}
+
+/* Global for IPS driver to get at the current i915 device */
+static struct drm_i915_private *i915_mch_dev;
+/*
+ * Lock protecting IPS related data structures
+ *   - i915_mch_dev
+ *   - dev_priv->max_delay
+ *   - dev_priv->min_delay
+ *   - dev_priv->fmax
+ *   - dev_priv->gpu_busy
+ */
+DEFINE_SPINLOCK(mchdev_lock);
+
+/**
+ * i915_read_mch_val - return value for IPS use
+ *
+ * Calculate and return a value for the IPS driver to use when deciding whether
+ * we have thermal and power headroom to increase CPU or GPU power budget.
+ */
+unsigned long i915_read_mch_val(void)
+{
+  	struct drm_i915_private *dev_priv;
+	unsigned long chipset_val, graphics_val, ret = 0;
+
+  	spin_lock(&mchdev_lock);
+	if (!i915_mch_dev)
+		goto out_unlock;
+	dev_priv = i915_mch_dev;
+
+	chipset_val = i915_chipset_val(dev_priv);
+	graphics_val = i915_gfx_val(dev_priv);
+
+	ret = chipset_val + graphics_val;
+
+out_unlock:
+  	spin_unlock(&mchdev_lock);
+
+  	return ret;
+}
+EXPORT_SYMBOL_GPL(i915_read_mch_val);
+
+/**
+ * i915_gpu_raise - raise GPU frequency limit
+ *
+ * Raise the limit; IPS indicates we have thermal headroom.
+ */
+bool i915_gpu_raise(void)
+{
+  	struct drm_i915_private *dev_priv;
+	bool ret = true;
+
+  	spin_lock(&mchdev_lock);
+	if (!i915_mch_dev) {
+		ret = false;
+		goto out_unlock;
+	}
+	dev_priv = i915_mch_dev;
+
+	if (dev_priv->max_delay > dev_priv->fmax)
+		dev_priv->max_delay--;
+
+out_unlock:
+  	spin_unlock(&mchdev_lock);
+
+  	return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_raise);
+
+/**
+ * i915_gpu_lower - lower GPU frequency limit
+ *
+ * IPS indicates we're close to a thermal limit, so throttle back the GPU
+ * frequency maximum.
+ */
+bool i915_gpu_lower(void)
+{
+  	struct drm_i915_private *dev_priv;
+	bool ret = true;
+
+  	spin_lock(&mchdev_lock);
+	if (!i915_mch_dev) {
+		ret = false;
+		goto out_unlock;
+	}
+	dev_priv = i915_mch_dev;
+
+	if (dev_priv->max_delay < dev_priv->min_delay)
+		dev_priv->max_delay++;
+
+out_unlock:
+  	spin_unlock(&mchdev_lock);
+
+  	return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_lower);
+
+/**
+ * i915_gpu_busy - indicate GPU business to IPS
+ *
+ * Tell the IPS driver whether or not the GPU is busy.
+ */
+bool i915_gpu_busy(void)
+{
+  	struct drm_i915_private *dev_priv;
+	bool ret = false;
+
+  	spin_lock(&mchdev_lock);
+	if (!i915_mch_dev)
+		goto out_unlock;
+	dev_priv = i915_mch_dev;
+
+	ret = dev_priv->busy;
+
+out_unlock:
+  	spin_unlock(&mchdev_lock);
+
+  	return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_busy);
+
+/**
+ * i915_gpu_turbo_disable - disable graphics turbo
+ *
+ * Disable graphics turbo by resetting the max frequency and setting the
+ * current frequency to the default.
+ */
+bool i915_gpu_turbo_disable(void)
+{
+  	struct drm_i915_private *dev_priv;
+	bool ret = true;
+
+  	spin_lock(&mchdev_lock);
+	if (!i915_mch_dev) {
+		ret = false;
+		goto out_unlock;
+	}
+	dev_priv = i915_mch_dev;
+
+	dev_priv->max_delay = dev_priv->fstart;
+
+	if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
+		ret = false;
+
+out_unlock:
+  	spin_unlock(&mchdev_lock);
+
+  	return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
+
 /**
  * i915_driver_load - setup chip and create an initial config
  * @dev: DRM device
@@ -1594,7 +2029,6 @@
 	resource_size_t base, size;
 	int ret = 0, mmio_bar;
 	uint32_t agp_size, prealloc_size, prealloc_start;
-
 	/* i915 has 4 more counters */
 	dev->counters += 4;
 	dev->types[6] = _DRM_STAT_IRQ;
@@ -1672,6 +2106,13 @@
 		dev_priv->has_gem = 0;
 	}
 
+	if (dev_priv->has_gem == 0 &&
+	    drm_core_check_feature(dev, DRIVER_MODESET)) {
+		DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n");
+		ret = -ENODEV;
+		goto out_iomapfree;
+	}
+
 	dev->driver->get_vblank_counter = i915_get_vblank_counter;
 	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
 	if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
@@ -1691,7 +2132,10 @@
 			goto out_workqueue_free;
 	}
 
-	i915_get_mem_freq(dev);
+	if (IS_PINEVIEW(dev))
+		i915_pineview_get_mem_freq(dev);
+	else if (IS_IRONLAKE(dev))
+		i915_ironlake_get_mem_freq(dev);
 
 	/* On the 945G/GM, the chipset reports the MSI capability on the
 	 * integrated graphics even though the support isn't actually there
@@ -1709,7 +2153,6 @@
 
 	spin_lock_init(&dev_priv->user_irq_lock);
 	spin_lock_init(&dev_priv->error_lock);
-	dev_priv->user_irq_refcount = 0;
 	dev_priv->trace_irq_seqno = 0;
 
 	ret = drm_vblank_init(dev, I915_NUM_PIPE);
@@ -1738,6 +2181,12 @@
 
 	setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
 		    (unsigned long) dev);
+
+	spin_lock(&mchdev_lock);
+	i915_mch_dev = dev_priv;
+	dev_priv->mchdev_lock = &mchdev_lock;
+	spin_unlock(&mchdev_lock);
+
 	return 0;
 
 out_workqueue_free:
@@ -1759,6 +2208,10 @@
 
 	i915_destroy_error_state(dev);
 
+	spin_lock(&mchdev_lock);
+	i915_mch_dev = NULL;
+	spin_unlock(&mchdev_lock);
+
 	destroy_workqueue(dev_priv->wq);
 	del_timer_sync(&dev_priv->hangcheck_timer);
 
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 5c51e45..423dc90 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -60,95 +60,95 @@
 	.subdevice = PCI_ANY_ID,		\
 	.driver_data = (unsigned long) info }
 
-const static struct intel_device_info intel_i830_info = {
+static const struct intel_device_info intel_i830_info = {
 	.is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
 };
 
-const static struct intel_device_info intel_845g_info = {
+static const struct intel_device_info intel_845g_info = {
 	.is_i8xx = 1,
 };
 
-const static struct intel_device_info intel_i85x_info = {
+static const struct intel_device_info intel_i85x_info = {
 	.is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
 	.cursor_needs_physical = 1,
 };
 
-const static struct intel_device_info intel_i865g_info = {
+static const struct intel_device_info intel_i865g_info = {
 	.is_i8xx = 1,
 };
 
-const static struct intel_device_info intel_i915g_info = {
+static const struct intel_device_info intel_i915g_info = {
 	.is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
 };
-const static struct intel_device_info intel_i915gm_info = {
+static const struct intel_device_info intel_i915gm_info = {
 	.is_i9xx = 1,  .is_mobile = 1,
 	.cursor_needs_physical = 1,
 };
-const static struct intel_device_info intel_i945g_info = {
+static const struct intel_device_info intel_i945g_info = {
 	.is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
 };
-const static struct intel_device_info intel_i945gm_info = {
+static const struct intel_device_info intel_i945gm_info = {
 	.is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
 	.has_hotplug = 1, .cursor_needs_physical = 1,
 };
 
-const static struct intel_device_info intel_i965g_info = {
+static const struct intel_device_info intel_i965g_info = {
 	.is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
 };
 
-const static struct intel_device_info intel_i965gm_info = {
+static const struct intel_device_info intel_i965gm_info = {
 	.is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1,
 	.is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
 	.has_hotplug = 1,
 };
 
-const static struct intel_device_info intel_g33_info = {
+static const struct intel_device_info intel_g33_info = {
 	.is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1,
 	.has_hotplug = 1,
 };
 
-const static struct intel_device_info intel_g45_info = {
+static const struct intel_device_info intel_g45_info = {
 	.is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
 	.has_pipe_cxsr = 1,
 	.has_hotplug = 1,
 };
 
-const static struct intel_device_info intel_gm45_info = {
+static const struct intel_device_info intel_gm45_info = {
 	.is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1,
 	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
 	.has_pipe_cxsr = 1,
 	.has_hotplug = 1,
 };
 
-const static struct intel_device_info intel_pineview_info = {
+static const struct intel_device_info intel_pineview_info = {
 	.is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
 	.need_gfx_hws = 1,
 	.has_hotplug = 1,
 };
 
-const static struct intel_device_info intel_ironlake_d_info = {
+static const struct intel_device_info intel_ironlake_d_info = {
 	.is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
 	.has_pipe_cxsr = 1,
 	.has_hotplug = 1,
 };
 
-const static struct intel_device_info intel_ironlake_m_info = {
+static const struct intel_device_info intel_ironlake_m_info = {
 	.is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
 	.need_gfx_hws = 1, .has_rc6 = 1,
 	.has_hotplug = 1,
 };
 
-const static struct intel_device_info intel_sandybridge_d_info = {
+static const struct intel_device_info intel_sandybridge_d_info = {
 	.is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
 	.has_hotplug = 1, .is_gen6 = 1,
 };
 
-const static struct intel_device_info intel_sandybridge_m_info = {
+static const struct intel_device_info intel_sandybridge_m_info = {
 	.is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1,
 	.has_hotplug = 1, .is_gen6 = 1,
 };
 
-const static struct pci_device_id pciidlist[] = {
+static const struct pci_device_id pciidlist[] = {
 	INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
 	INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
 	INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
@@ -340,7 +340,7 @@
 	/*
 	 * Clear request list
 	 */
-	i915_gem_retire_requests(dev);
+	i915_gem_retire_requests(dev, &dev_priv->render_ring);
 
 	if (need_display)
 		i915_save_display(dev);
@@ -370,6 +370,7 @@
 		}
 	} else {
 		DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
+		mutex_unlock(&dev->struct_mutex);
 		return -ENODEV;
 	}
 
@@ -388,33 +389,10 @@
 	 * switched away).
 	 */
 	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
-	    !dev_priv->mm.suspended) {
-		drm_i915_ring_buffer_t *ring = &dev_priv->ring;
-		struct drm_gem_object *obj = ring->ring_obj;
-		struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+			!dev_priv->mm.suspended) {
+		struct intel_ring_buffer *ring = &dev_priv->render_ring;
 		dev_priv->mm.suspended = 0;
-
-		/* Stop the ring if it's running. */
-		I915_WRITE(PRB0_CTL, 0);
-		I915_WRITE(PRB0_TAIL, 0);
-		I915_WRITE(PRB0_HEAD, 0);
-
-		/* Initialize the ring. */
-		I915_WRITE(PRB0_START, obj_priv->gtt_offset);
-		I915_WRITE(PRB0_CTL,
-			   ((obj->size - 4096) & RING_NR_PAGES) |
-			   RING_NO_REPORT |
-			   RING_VALID);
-		if (!drm_core_check_feature(dev, DRIVER_MODESET))
-			i915_kernel_lost_context(dev);
-		else {
-			ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-			ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
-			ring->space = ring->head - (ring->tail + 8);
-			if (ring->space < 0)
-				ring->space += ring->Size;
-		}
-
+		ring->init(dev, ring);
 		mutex_unlock(&dev->struct_mutex);
 		drm_irq_uninstall(dev);
 		drm_irq_install(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7f797ef..9ed8ecd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -32,6 +32,7 @@
 
 #include "i915_reg.h"
 #include "intel_bios.h"
+#include "intel_ringbuffer.h"
 #include <linux/io-mapping.h>
 
 /* General customization:
@@ -55,6 +56,8 @@
 
 #define I915_NUM_PIPE	2
 
+#define I915_GEM_GPU_DOMAINS	(~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+
 /* Interface history:
  *
  * 1.1: Original.
@@ -89,16 +92,6 @@
 	struct drm_gem_object *cur_obj;
 };
 
-typedef struct _drm_i915_ring_buffer {
-	unsigned long Size;
-	u8 *virtual_start;
-	int head;
-	int tail;
-	int space;
-	drm_local_map_t map;
-	struct drm_gem_object *ring_obj;
-} drm_i915_ring_buffer_t;
-
 struct mem_block {
 	struct mem_block *next;
 	struct mem_block *prev;
@@ -241,17 +234,15 @@
 	void __iomem *regs;
 
 	struct pci_dev *bridge_dev;
-	drm_i915_ring_buffer_t ring;
+	struct intel_ring_buffer render_ring;
+	struct intel_ring_buffer bsd_ring;
 
 	drm_dma_handle_t *status_page_dmah;
-	void *hw_status_page;
 	void *seqno_page;
 	dma_addr_t dma_status_page;
 	uint32_t counter;
-	unsigned int status_gfx_addr;
 	unsigned int seqno_gfx_addr;
 	drm_local_map_t hws_map;
-	struct drm_gem_object *hws_obj;
 	struct drm_gem_object *seqno_obj;
 	struct drm_gem_object *pwrctx;
 
@@ -267,8 +258,6 @@
 	atomic_t irq_received;
 	/** Protects user_irq_refcount and irq_mask_reg */
 	spinlock_t user_irq_lock;
-	/** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
-	int user_irq_refcount;
 	u32 trace_irq_seqno;
 	/** Cached value of IMR to avoid reads in updating the bitfield */
 	u32 irq_mask_reg;
@@ -334,7 +323,7 @@
 	int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
 	int num_fence_regs; /* 8 on pre-965, 16 otherwise */
 
-	unsigned int fsb_freq, mem_freq;
+	unsigned int fsb_freq, mem_freq, is_ddr3;
 
 	spinlock_t error_lock;
 	struct drm_i915_error_state *first_error;
@@ -514,18 +503,7 @@
 		 */
 		struct list_head shrink_list;
 
-		/**
-		 * List of objects currently involved in rendering from the
-		 * ringbuffer.
-		 *
-		 * Includes buffers having the contents of their GPU caches
-		 * flushed, not necessarily primitives.  last_rendering_seqno
-		 * represents when the rendering involved will be completed.
-		 *
-		 * A reference is held on the buffer while on this list.
-		 */
 		spinlock_t active_list_lock;
-		struct list_head active_list;
 
 		/**
 		 * List of objects which are not in the ringbuffer but which
@@ -563,12 +541,6 @@
 		struct list_head fence_list;
 
 		/**
-		 * List of breadcrumbs associated with GPU requests currently
-		 * outstanding.
-		 */
-		struct list_head request_list;
-
-		/**
 		 * We leave the user IRQ off as much as possible,
 		 * but this means that requests will finish and never
 		 * be retired once the system goes idle. Set a timer to
@@ -644,6 +616,18 @@
 	u8 cur_delay;
 	u8 min_delay;
 	u8 max_delay;
+	u8 fmax;
+	u8 fstart;
+
+ 	u64 last_count1;
+ 	unsigned long last_time1;
+ 	u64 last_count2;
+ 	struct timespec last_time2;
+ 	unsigned long gfx_power;
+ 	int c_m;
+ 	int r_t;
+ 	u8 corr;
+	spinlock_t *mchdev_lock;
 
 	enum no_fbc_reason no_fbc_reason;
 
@@ -671,19 +655,64 @@
 	 * (has pending rendering), and is not set if it's on inactive (ready
 	 * to be unbound).
 	 */
-	int active;
+	unsigned int active : 1;
 
 	/**
 	 * This is set if the object has been written to since last bound
 	 * to the GTT
 	 */
-	int dirty;
+	unsigned int dirty : 1;
+
+	/**
+	 * Fence register bits (if any) for this object.  Will be set
+	 * as needed when mapped into the GTT.
+	 * Protected by dev->struct_mutex.
+	 *
+	 * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE)
+	 */
+	int fence_reg : 5;
+
+	/**
+	 * Used for checking the object doesn't appear more than once
+	 * in an execbuffer object list.
+	 */
+	unsigned int in_execbuffer : 1;
+
+	/**
+	 * Advice: are the backing pages purgeable?
+	 */
+	unsigned int madv : 2;
+
+	/**
+	 * Refcount for the pages array. With the current locking scheme, there
+	 * are at most two concurrent users: Binding a bo to the gtt and
+	 * pwrite/pread using physical addresses. So two bits for a maximum
+	 * of two users are enough.
+	 */
+	unsigned int pages_refcount : 2;
+#define DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT 0x3
+
+	/**
+	 * Current tiling mode for the object.
+	 */
+	unsigned int tiling_mode : 2;
+
+	/** How many users have pinned this object in GTT space. The following
+	 * users can each hold at most one reference: pwrite/pread, pin_ioctl
+	 * (via user_pin_count), execbuffer (objects are not allowed multiple
+	 * times for the same batchbuffer), and the framebuffer code. When
+	 * switching/pageflipping, the framebuffer code has at most two buffers
+	 * pinned per crtc.
+	 *
+	 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+	 * bits with absolutely no headroom. So use 4 bits. */
+	int pin_count : 4;
+#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
 
 	/** AGP memory structure for our GTT binding. */
 	DRM_AGP_MEM *agp_mem;
 
 	struct page **pages;
-	int pages_refcount;
 
 	/**
 	 * Current offset of the object in GTT space.
@@ -692,26 +721,18 @@
 	 */
 	uint32_t gtt_offset;
 
+	/* Which ring is refering to is this object */
+	struct intel_ring_buffer *ring;
+
 	/**
 	 * Fake offset for use by mmap(2)
 	 */
 	uint64_t mmap_offset;
 
-	/**
-	 * Fence register bits (if any) for this object.  Will be set
-	 * as needed when mapped into the GTT.
-	 * Protected by dev->struct_mutex.
-	 */
-	int fence_reg;
-
-	/** How many users have pinned this object in GTT space */
-	int pin_count;
-
 	/** Breadcrumb of last rendering to the buffer. */
 	uint32_t last_rendering_seqno;
 
-	/** Current tiling mode for the object. */
-	uint32_t tiling_mode;
+	/** Current tiling stride for the object, if it's tiled. */
 	uint32_t stride;
 
 	/** Record of address bit 17 of each page at last unbind. */
@@ -734,17 +755,6 @@
 	struct drm_i915_gem_phys_object *phys_obj;
 
 	/**
-	 * Used for checking the object doesn't appear more than once
-	 * in an execbuffer object list.
-	 */
-	int in_execbuffer;
-
-	/**
-	 * Advice: are the backing pages purgeable?
-	 */
-	int madv;
-
-	/**
 	 * Number of crtcs where this object is currently the fb, but
 	 * will be page flipped away on the next vblank.  When it
 	 * reaches 0, dev_priv->pending_flip_queue will be woken up.
@@ -765,6 +775,9 @@
  * an emission time with seqnos for tracking how far ahead of the GPU we are.
  */
 struct drm_i915_gem_request {
+	/** On Which ring this request was generated */
+	struct intel_ring_buffer *ring;
+
 	/** GEM sequence number associated with this request. */
 	uint32_t seqno;
 
@@ -821,6 +834,11 @@
 			 struct drm_clip_rect *boxes,
 			 int i, int DR1, int DR4);
 extern int i965_reset(struct drm_device *dev, u8 flags);
+extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
+extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
+extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
+extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
+
 
 /* i915_irq.c */
 void i915_hangcheck_elapsed(unsigned long data);
@@ -829,9 +847,7 @@
 			 struct drm_file *file_priv);
 extern int i915_irq_wait(struct drm_device *dev, void *data,
 			 struct drm_file *file_priv);
-void i915_user_irq_get(struct drm_device *dev);
 void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
-void i915_user_irq_put(struct drm_device *dev);
 extern void i915_enable_interrupt (struct drm_device *dev);
 
 extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
@@ -849,6 +865,11 @@
 extern int i915_vblank_swap(struct drm_device *dev, void *data,
 			    struct drm_file *file_priv);
 extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
+extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask);
+extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv,
+		u32 mask);
+extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv,
+		u32 mask);
 
 void
 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -922,11 +943,13 @@
 int i915_gem_object_unbind(struct drm_gem_object *obj);
 void i915_gem_release_mmap(struct drm_gem_object *obj);
 void i915_gem_lastclose(struct drm_device *dev);
-uint32_t i915_get_gem_seqno(struct drm_device *dev);
+uint32_t i915_get_gem_seqno(struct drm_device *dev,
+		struct intel_ring_buffer *ring);
 bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
 int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
 int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
-void i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_requests(struct drm_device *dev,
+		 struct intel_ring_buffer *ring);
 void i915_gem_retire_work_handler(struct work_struct *work);
 void i915_gem_clflush_object(struct drm_gem_object *obj);
 int i915_gem_object_set_domain(struct drm_gem_object *obj,
@@ -937,9 +960,13 @@
 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
 		     unsigned long end);
 int i915_gem_idle(struct drm_device *dev);
-uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
-			  uint32_t flush_domains);
-int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible);
+uint32_t i915_add_request(struct drm_device *dev,
+		struct drm_file *file_priv,
+		uint32_t flush_domains,
+		struct intel_ring_buffer *ring);
+int i915_do_wait_request(struct drm_device *dev,
+		uint32_t seqno, int interruptible,
+		struct intel_ring_buffer *ring);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
 				      int write);
@@ -1015,7 +1042,7 @@
 extern void intel_disable_fbc(struct drm_device *dev);
 extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
 extern bool intel_fbc_enabled(struct drm_device *dev);
-
+extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
 extern void intel_detect_pch (struct drm_device *dev);
 extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
 
@@ -1026,7 +1053,8 @@
  * has access to the ring.
  */
 #define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do {			\
-	if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \
+	if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \
+			== NULL)					\
 		LOCK_TEST_WITH_RETURN(dev, file_priv);			\
 } while (0)
 
@@ -1039,35 +1067,31 @@
 #define I915_WRITE64(reg, val)	writeq(val, dev_priv->regs + (reg))
 #define I915_READ64(reg)	readq(dev_priv->regs + (reg))
 #define POSTING_READ(reg)	(void)I915_READ(reg)
+#define POSTING_READ16(reg)	(void)I915_READ16(reg)
 
 #define I915_VERBOSE 0
 
-#define RING_LOCALS	volatile unsigned int *ring_virt__;
-
-#define BEGIN_LP_RING(n) do {						\
-	int bytes__ = 4*(n);						\
-	if (I915_VERBOSE) DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n));	\
-	/* a wrap must occur between instructions so pad beforehand */	\
-	if (unlikely (dev_priv->ring.tail + bytes__ > dev_priv->ring.Size)) \
-		i915_wrap_ring(dev);					\
-	if (unlikely (dev_priv->ring.space < bytes__))			\
-		i915_wait_ring(dev, bytes__, __func__);			\
-	ring_virt__ = (unsigned int *)					\
-	        (dev_priv->ring.virtual_start + dev_priv->ring.tail);	\
-	dev_priv->ring.tail += bytes__;					\
-	dev_priv->ring.tail &= dev_priv->ring.Size - 1;			\
-	dev_priv->ring.space -= bytes__;				\
+#define BEGIN_LP_RING(n)  do { \
+	drm_i915_private_t *dev_priv = dev->dev_private;                \
+	if (I915_VERBOSE)						\
+		DRM_DEBUG("   BEGIN_LP_RING %x\n", (int)(n));		\
+	intel_ring_begin(dev, &dev_priv->render_ring, 4*(n));		\
 } while (0)
 
-#define OUT_RING(n) do {						\
-	if (I915_VERBOSE) DRM_DEBUG("   OUT_RING %x\n", (int)(n));	\
-	*ring_virt__++ = (n);						\
+
+#define OUT_RING(x) do {						\
+	drm_i915_private_t *dev_priv = dev->dev_private;		\
+	if (I915_VERBOSE)						\
+		DRM_DEBUG("   OUT_RING %x\n", (int)(x));		\
+	intel_ring_emit(dev, &dev_priv->render_ring, x);		\
 } while (0)
 
 #define ADVANCE_LP_RING() do {						\
+	drm_i915_private_t *dev_priv = dev->dev_private;                \
 	if (I915_VERBOSE)						\
-		DRM_DEBUG("ADVANCE_LP_RING %x\n", dev_priv->ring.tail);	\
-	I915_WRITE(PRB0_TAIL, dev_priv->ring.tail);			\
+		DRM_DEBUG("ADVANCE_LP_RING %x\n",			\
+				dev_priv->render_ring.tail);		\
+	intel_ring_advance(dev, &dev_priv->render_ring);		\
 } while(0)
 
 /**
@@ -1085,14 +1109,12 @@
  *
  * The area from dword 0x20 to 0x3ff is available for driver usage.
  */
-#define READ_HWSP(dev_priv, reg)  (((volatile u32*)(dev_priv->hw_status_page))[reg])
+#define READ_HWSP(dev_priv, reg)  (((volatile u32 *)\
+			(dev_priv->render_ring.status_page.page_addr))[reg])
 #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
 #define I915_GEM_HWS_INDEX		0x20
 #define I915_BREADCRUMB_INDEX		0x21
 
-extern int i915_wrap_ring(struct drm_device * dev);
-extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
-
 #define INTEL_INFO(dev)	(((struct drm_i915_private *) (dev)->dev_private)->info)
 
 #define IS_I830(dev)		((dev)->pci_device == 0x3577)
@@ -1138,6 +1160,7 @@
 			 (dev)->pci_device == 0x2A42 ||		\
 			 (dev)->pci_device == 0x2E42)
 
+#define HAS_BSD(dev)            (IS_IRONLAKE(dev) || IS_G4X(dev))
 #define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
 
 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 112699f..9ded3da 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -35,8 +35,6 @@
 #include <linux/swap.h>
 #include <linux/pci.h>
 
-#define I915_GEM_GPU_DOMAINS	(~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
-
 static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
@@ -169,7 +167,7 @@
 		obj_priv->tiling_mode != I915_TILING_NONE;
 }
 
-static inline int
+static inline void
 slow_shmem_copy(struct page *dst_page,
 		int dst_offset,
 		struct page *src_page,
@@ -178,25 +176,16 @@
 {
 	char *dst_vaddr, *src_vaddr;
 
-	dst_vaddr = kmap_atomic(dst_page, KM_USER0);
-	if (dst_vaddr == NULL)
-		return -ENOMEM;
-
-	src_vaddr = kmap_atomic(src_page, KM_USER1);
-	if (src_vaddr == NULL) {
-		kunmap_atomic(dst_vaddr, KM_USER0);
-		return -ENOMEM;
-	}
+	dst_vaddr = kmap(dst_page);
+	src_vaddr = kmap(src_page);
 
 	memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
 
-	kunmap_atomic(src_vaddr, KM_USER1);
-	kunmap_atomic(dst_vaddr, KM_USER0);
-
-	return 0;
+	kunmap(src_page);
+	kunmap(dst_page);
 }
 
-static inline int
+static inline void
 slow_shmem_bit17_copy(struct page *gpu_page,
 		      int gpu_offset,
 		      struct page *cpu_page,
@@ -216,15 +205,8 @@
 					       cpu_page, cpu_offset, length);
 	}
 
-	gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
-	if (gpu_vaddr == NULL)
-		return -ENOMEM;
-
-	cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
-	if (cpu_vaddr == NULL) {
-		kunmap_atomic(gpu_vaddr, KM_USER0);
-		return -ENOMEM;
-	}
+	gpu_vaddr = kmap(gpu_page);
+	cpu_vaddr = kmap(cpu_page);
 
 	/* Copy the data, XORing A6 with A17 (1). The user already knows he's
 	 * XORing with the other bits (A9 for Y, A9 and A10 for X)
@@ -248,10 +230,8 @@
 		length -= this_length;
 	}
 
-	kunmap_atomic(cpu_vaddr, KM_USER1);
-	kunmap_atomic(gpu_vaddr, KM_USER0);
-
-	return 0;
+	kunmap(cpu_page);
+	kunmap(gpu_page);
 }
 
 /**
@@ -427,21 +407,19 @@
 			page_length = PAGE_SIZE - data_page_offset;
 
 		if (do_bit17_swizzling) {
-			ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
-						    shmem_page_offset,
-						    user_pages[data_page_index],
-						    data_page_offset,
-						    page_length,
-						    1);
-		} else {
-			ret = slow_shmem_copy(user_pages[data_page_index],
-					      data_page_offset,
-					      obj_priv->pages[shmem_page_index],
+			slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
 					      shmem_page_offset,
-					      page_length);
+					      user_pages[data_page_index],
+					      data_page_offset,
+					      page_length,
+					      1);
+		} else {
+			slow_shmem_copy(user_pages[data_page_index],
+					data_page_offset,
+					obj_priv->pages[shmem_page_index],
+					shmem_page_offset,
+					page_length);
 		}
-		if (ret)
-			goto fail_put_pages;
 
 		remain -= page_length;
 		data_ptr += page_length;
@@ -531,25 +509,24 @@
  * page faults
  */
 
-static inline int
+static inline void
 slow_kernel_write(struct io_mapping *mapping,
 		  loff_t gtt_base, int gtt_offset,
 		  struct page *user_page, int user_offset,
 		  int length)
 {
-	char *src_vaddr, *dst_vaddr;
-	unsigned long unwritten;
+	char __iomem *dst_vaddr;
+	char *src_vaddr;
 
-	dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
-	src_vaddr = kmap_atomic(user_page, KM_USER1);
-	unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
-						      src_vaddr + user_offset,
-						      length);
-	kunmap_atomic(src_vaddr, KM_USER1);
-	io_mapping_unmap_atomic(dst_vaddr);
-	if (unwritten)
-		return -EFAULT;
-	return 0;
+	dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
+	src_vaddr = kmap(user_page);
+
+	memcpy_toio(dst_vaddr + gtt_offset,
+		    src_vaddr + user_offset,
+		    length);
+
+	kunmap(user_page);
+	io_mapping_unmap(dst_vaddr);
 }
 
 static inline int
@@ -722,18 +699,11 @@
 		if ((data_page_offset + page_length) > PAGE_SIZE)
 			page_length = PAGE_SIZE - data_page_offset;
 
-		ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
-					gtt_page_base, gtt_page_offset,
-					user_pages[data_page_index],
-					data_page_offset,
-					page_length);
-
-		/* If we get a fault while copying data, then (presumably) our
-		 * source page isn't available.  Return the error and we'll
-		 * retry in the slow path.
-		 */
-		if (ret)
-			goto out_unpin_object;
+		slow_kernel_write(dev_priv->mm.gtt_mapping,
+				  gtt_page_base, gtt_page_offset,
+				  user_pages[data_page_index],
+				  data_page_offset,
+				  page_length);
 
 		remain -= page_length;
 		offset += page_length;
@@ -902,21 +872,19 @@
 			page_length = PAGE_SIZE - data_page_offset;
 
 		if (do_bit17_swizzling) {
-			ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
-						    shmem_page_offset,
-						    user_pages[data_page_index],
-						    data_page_offset,
-						    page_length,
-						    0);
-		} else {
-			ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
+			slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
 					      shmem_page_offset,
 					      user_pages[data_page_index],
 					      data_page_offset,
-					      page_length);
+					      page_length,
+					      0);
+		} else {
+			slow_shmem_copy(obj_priv->pages[shmem_page_index],
+					shmem_page_offset,
+					user_pages[data_page_index],
+					data_page_offset,
+					page_length);
 		}
-		if (ret)
-			goto fail_put_pages;
 
 		remain -= page_length;
 		data_ptr += page_length;
@@ -973,7 +941,8 @@
 	if (obj_priv->phys_obj)
 		ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
 	else if (obj_priv->tiling_mode == I915_TILING_NONE &&
-		 dev->gtt_total != 0) {
+		 dev->gtt_total != 0 &&
+		 obj->write_domain != I915_GEM_DOMAIN_CPU) {
 		ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
 		if (ret == -EFAULT) {
 			ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
@@ -1484,11 +1453,14 @@
 }
 
 static void
-i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
+i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
+			       struct intel_ring_buffer *ring)
 {
 	struct drm_device *dev = obj->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	BUG_ON(ring == NULL);
+	obj_priv->ring = ring;
 
 	/* Add a reference if we're newly entering the active list. */
 	if (!obj_priv->active) {
@@ -1497,8 +1469,7 @@
 	}
 	/* Move from whatever list we were on to the tail of execution. */
 	spin_lock(&dev_priv->mm.active_list_lock);
-	list_move_tail(&obj_priv->list,
-		       &dev_priv->mm.active_list);
+	list_move_tail(&obj_priv->list, &ring->active_list);
 	spin_unlock(&dev_priv->mm.active_list_lock);
 	obj_priv->last_rendering_seqno = seqno;
 }
@@ -1551,6 +1522,7 @@
 	BUG_ON(!list_empty(&obj_priv->gpu_write_list));
 
 	obj_priv->last_rendering_seqno = 0;
+	obj_priv->ring = NULL;
 	if (obj_priv->active) {
 		obj_priv->active = 0;
 		drm_gem_object_unreference(obj);
@@ -1560,7 +1532,8 @@
 
 static void
 i915_gem_process_flushing_list(struct drm_device *dev,
-			       uint32_t flush_domains, uint32_t seqno)
+			       uint32_t flush_domains, uint32_t seqno,
+			       struct intel_ring_buffer *ring)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj_priv, *next;
@@ -1571,12 +1544,13 @@
 		struct drm_gem_object *obj = &obj_priv->base;
 
 		if ((obj->write_domain & flush_domains) ==
-		    obj->write_domain) {
+		    obj->write_domain &&
+		    obj_priv->ring->ring_flag == ring->ring_flag) {
 			uint32_t old_write_domain = obj->write_domain;
 
 			obj->write_domain = 0;
 			list_del_init(&obj_priv->gpu_write_list);
-			i915_gem_object_move_to_active(obj, seqno);
+			i915_gem_object_move_to_active(obj, seqno, ring);
 
 			/* update the fence lru list */
 			if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
@@ -1593,31 +1567,15 @@
 	}
 }
 
-#define PIPE_CONTROL_FLUSH(addr)					\
-	OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |		\
-		 PIPE_CONTROL_DEPTH_STALL);				\
-	OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT);			\
-	OUT_RING(0);							\
-	OUT_RING(0);							\
-
-/**
- * Creates a new sequence number, emitting a write of it to the status page
- * plus an interrupt, which will trigger i915_user_interrupt_handler.
- *
- * Must be called with struct_lock held.
- *
- * Returned sequence numbers are nonzero on success.
- */
 uint32_t
 i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
-		 uint32_t flush_domains)
+		 uint32_t flush_domains, struct intel_ring_buffer *ring)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_file_private *i915_file_priv = NULL;
 	struct drm_i915_gem_request *request;
 	uint32_t seqno;
 	int was_empty;
-	RING_LOCALS;
 
 	if (file_priv != NULL)
 		i915_file_priv = file_priv->driver_priv;
@@ -1626,62 +1584,14 @@
 	if (request == NULL)
 		return 0;
 
-	/* Grab the seqno we're going to make this request be, and bump the
-	 * next (skipping 0 so it can be the reserved no-seqno value).
-	 */
-	seqno = dev_priv->mm.next_gem_seqno;
-	dev_priv->mm.next_gem_seqno++;
-	if (dev_priv->mm.next_gem_seqno == 0)
-		dev_priv->mm.next_gem_seqno++;
-
-	if (HAS_PIPE_CONTROL(dev)) {
-		u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
-
-		/*
-		 * Workaround qword write incoherence by flushing the
-		 * PIPE_NOTIFY buffers out to memory before requesting
-		 * an interrupt.
-		 */
-		BEGIN_LP_RING(32);
-		OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
-			 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
-		OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
-		OUT_RING(seqno);
-		OUT_RING(0);
-		PIPE_CONTROL_FLUSH(scratch_addr);
-		scratch_addr += 128; /* write to separate cachelines */
-		PIPE_CONTROL_FLUSH(scratch_addr);
-		scratch_addr += 128;
-		PIPE_CONTROL_FLUSH(scratch_addr);
-		scratch_addr += 128;
-		PIPE_CONTROL_FLUSH(scratch_addr);
-		scratch_addr += 128;
-		PIPE_CONTROL_FLUSH(scratch_addr);
-		scratch_addr += 128;
-		PIPE_CONTROL_FLUSH(scratch_addr);
-		OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
-			 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
-			 PIPE_CONTROL_NOTIFY);
-		OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
-		OUT_RING(seqno);
-		OUT_RING(0);
-		ADVANCE_LP_RING();
-	} else {
-		BEGIN_LP_RING(4);
-		OUT_RING(MI_STORE_DWORD_INDEX);
-		OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-		OUT_RING(seqno);
-
-		OUT_RING(MI_USER_INTERRUPT);
-		ADVANCE_LP_RING();
-	}
-
-	DRM_DEBUG_DRIVER("%d\n", seqno);
+	seqno = ring->add_request(dev, ring, file_priv, flush_domains);
 
 	request->seqno = seqno;
+	request->ring = ring;
 	request->emitted_jiffies = jiffies;
-	was_empty = list_empty(&dev_priv->mm.request_list);
-	list_add_tail(&request->list, &dev_priv->mm.request_list);
+	was_empty = list_empty(&ring->request_list);
+	list_add_tail(&request->list, &ring->request_list);
+
 	if (i915_file_priv) {
 		list_add_tail(&request->client_list,
 			      &i915_file_priv->mm.request_list);
@@ -1693,7 +1603,7 @@
 	 * domain we're flushing with our flush.
 	 */
 	if (flush_domains != 0) 
-		i915_gem_process_flushing_list(dev, flush_domains, seqno);
+		i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
 
 	if (!dev_priv->mm.suspended) {
 		mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
@@ -1710,20 +1620,16 @@
  * before signalling the CPU
  */
 static uint32_t
-i915_retire_commands(struct drm_device *dev)
+i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
 	uint32_t flush_domains = 0;
-	RING_LOCALS;
 
 	/* The sampler always gets flushed on i965 (sigh) */
 	if (IS_I965G(dev))
 		flush_domains |= I915_GEM_DOMAIN_SAMPLER;
-	BEGIN_LP_RING(2);
-	OUT_RING(cmd);
-	OUT_RING(0); /* noop */
-	ADVANCE_LP_RING();
+
+	ring->flush(dev, ring,
+			I915_GEM_DOMAIN_COMMAND, flush_domains);
 	return flush_domains;
 }
 
@@ -1743,11 +1649,11 @@
 	 * by the ringbuffer to the flushing/inactive lists as appropriate.
 	 */
 	spin_lock(&dev_priv->mm.active_list_lock);
-	while (!list_empty(&dev_priv->mm.active_list)) {
+	while (!list_empty(&request->ring->active_list)) {
 		struct drm_gem_object *obj;
 		struct drm_i915_gem_object *obj_priv;
 
-		obj_priv = list_first_entry(&dev_priv->mm.active_list,
+		obj_priv = list_first_entry(&request->ring->active_list,
 					    struct drm_i915_gem_object,
 					    list);
 		obj = &obj_priv->base;
@@ -1794,35 +1700,33 @@
 }
 
 uint32_t
-i915_get_gem_seqno(struct drm_device *dev)
+i915_get_gem_seqno(struct drm_device *dev,
+		   struct intel_ring_buffer *ring)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
-
-	if (HAS_PIPE_CONTROL(dev))
-		return ((volatile u32 *)(dev_priv->seqno_page))[0];
-	else
-		return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
+	return ring->get_gem_seqno(dev, ring);
 }
 
 /**
  * This function clears the request list as sequence numbers are passed.
  */
 void
-i915_gem_retire_requests(struct drm_device *dev)
+i915_gem_retire_requests(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	uint32_t seqno;
 
-	if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list))
+	if (!ring->status_page.page_addr
+			|| list_empty(&ring->request_list))
 		return;
 
-	seqno = i915_get_gem_seqno(dev);
+	seqno = i915_get_gem_seqno(dev, ring);
 
-	while (!list_empty(&dev_priv->mm.request_list)) {
+	while (!list_empty(&ring->request_list)) {
 		struct drm_i915_gem_request *request;
 		uint32_t retiring_seqno;
 
-		request = list_first_entry(&dev_priv->mm.request_list,
+		request = list_first_entry(&ring->request_list,
 					   struct drm_i915_gem_request,
 					   list);
 		retiring_seqno = request->seqno;
@@ -1840,7 +1744,8 @@
 
 	if (unlikely (dev_priv->trace_irq_seqno &&
 		      i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
-		i915_user_irq_put(dev);
+
+		ring->user_irq_put(dev, ring);
 		dev_priv->trace_irq_seqno = 0;
 	}
 }
@@ -1856,15 +1761,22 @@
 	dev = dev_priv->dev;
 
 	mutex_lock(&dev->struct_mutex);
-	i915_gem_retire_requests(dev);
+	i915_gem_retire_requests(dev, &dev_priv->render_ring);
+
+	if (HAS_BSD(dev))
+		i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
+
 	if (!dev_priv->mm.suspended &&
-	    !list_empty(&dev_priv->mm.request_list))
+		(!list_empty(&dev_priv->render_ring.request_list) ||
+			(HAS_BSD(dev) &&
+			 !list_empty(&dev_priv->bsd_ring.request_list))))
 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
 	mutex_unlock(&dev->struct_mutex);
 }
 
 int
-i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
+i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
+		int interruptible, struct intel_ring_buffer *ring)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 ier;
@@ -1875,7 +1787,7 @@
 	if (atomic_read(&dev_priv->mm.wedged))
 		return -EIO;
 
-	if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
+	if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
 		if (HAS_PCH_SPLIT(dev))
 			ier = I915_READ(DEIER) | I915_READ(GTIER);
 		else
@@ -1889,19 +1801,21 @@
 
 		trace_i915_gem_request_wait_begin(dev, seqno);
 
-		dev_priv->mm.waiting_gem_seqno = seqno;
-		i915_user_irq_get(dev);
+		ring->waiting_gem_seqno = seqno;
+		ring->user_irq_get(dev, ring);
 		if (interruptible)
-			ret = wait_event_interruptible(dev_priv->irq_queue,
-				i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
-				atomic_read(&dev_priv->mm.wedged));
+			ret = wait_event_interruptible(ring->irq_queue,
+				i915_seqno_passed(
+					ring->get_gem_seqno(dev, ring), seqno)
+				|| atomic_read(&dev_priv->mm.wedged));
 		else
-			wait_event(dev_priv->irq_queue,
-				i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
-				atomic_read(&dev_priv->mm.wedged));
+			wait_event(ring->irq_queue,
+				i915_seqno_passed(
+					ring->get_gem_seqno(dev, ring), seqno)
+				|| atomic_read(&dev_priv->mm.wedged));
 
-		i915_user_irq_put(dev);
-		dev_priv->mm.waiting_gem_seqno = 0;
+		ring->user_irq_put(dev, ring);
+		ring->waiting_gem_seqno = 0;
 
 		trace_i915_gem_request_wait_end(dev, seqno);
 	}
@@ -1910,7 +1824,7 @@
 
 	if (ret && ret != -ERESTARTSYS)
 		DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
-			  __func__, ret, seqno, i915_get_gem_seqno(dev));
+			  __func__, ret, seqno, ring->get_gem_seqno(dev, ring));
 
 	/* Directly dispatch request retiring.  While we have the work queue
 	 * to handle this, the waiter on a request often wants an associated
@@ -1918,7 +1832,7 @@
 	 * a separate wait queue to handle that.
 	 */
 	if (ret == 0)
-		i915_gem_retire_requests(dev);
+		i915_gem_retire_requests(dev, ring);
 
 	return ret;
 }
@@ -1928,9 +1842,10 @@
  * request and object lists appropriately for that event.
  */
 static int
-i915_wait_request(struct drm_device *dev, uint32_t seqno)
+i915_wait_request(struct drm_device *dev, uint32_t seqno,
+		struct intel_ring_buffer *ring)
 {
-	return i915_do_wait_request(dev, seqno, 1);
+	return i915_do_wait_request(dev, seqno, 1, ring);
 }
 
 static void
@@ -1939,71 +1854,29 @@
 	       uint32_t flush_domains)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	uint32_t cmd;
-	RING_LOCALS;
-
-#if WATCH_EXEC
-	DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
-		  invalidate_domains, flush_domains);
-#endif
-	trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
-				     invalidate_domains, flush_domains);
-
 	if (flush_domains & I915_GEM_DOMAIN_CPU)
 		drm_agp_chipset_flush(dev);
+	dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
+			invalidate_domains,
+			flush_domains);
 
-	if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
-		/*
-		 * read/write caches:
-		 *
-		 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
-		 * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
-		 * also flushed at 2d versus 3d pipeline switches.
-		 *
-		 * read-only caches:
-		 *
-		 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
-		 * MI_READ_FLUSH is set, and is always flushed on 965.
-		 *
-		 * I915_GEM_DOMAIN_COMMAND may not exist?
-		 *
-		 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
-		 * invalidated when MI_EXE_FLUSH is set.
-		 *
-		 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
-		 * invalidated with every MI_FLUSH.
-		 *
-		 * TLBs:
-		 *
-		 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
-		 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
-		 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
-		 * are flushed at any MI_FLUSH.
-		 */
+	if (HAS_BSD(dev))
+		dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
+				invalidate_domains,
+				flush_domains);
+}
 
-		cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
-		if ((invalidate_domains|flush_domains) &
-		    I915_GEM_DOMAIN_RENDER)
-			cmd &= ~MI_NO_WRITE_FLUSH;
-		if (!IS_I965G(dev)) {
-			/*
-			 * On the 965, the sampler cache always gets flushed
-			 * and this bit is reserved.
-			 */
-			if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
-				cmd |= MI_READ_FLUSH;
-		}
-		if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
-			cmd |= MI_EXE_FLUSH;
-
-#if WATCH_EXEC
-		DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
-#endif
-		BEGIN_LP_RING(2);
-		OUT_RING(cmd);
-		OUT_RING(MI_NOOP);
-		ADVANCE_LP_RING();
-	}
+static void
+i915_gem_flush_ring(struct drm_device *dev,
+	       uint32_t invalidate_domains,
+	       uint32_t flush_domains,
+	       struct intel_ring_buffer *ring)
+{
+	if (flush_domains & I915_GEM_DOMAIN_CPU)
+		drm_agp_chipset_flush(dev);
+	ring->flush(dev, ring,
+			invalidate_domains,
+			flush_domains);
 }
 
 /**
@@ -2030,7 +1903,8 @@
 		DRM_INFO("%s: object %p wait for seqno %08x\n",
 			  __func__, obj, obj_priv->last_rendering_seqno);
 #endif
-		ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
+		ret = i915_wait_request(dev,
+				obj_priv->last_rendering_seqno, obj_priv->ring);
 		if (ret != 0)
 			return ret;
 	}
@@ -2146,11 +2020,14 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	bool lists_empty;
-	uint32_t seqno;
+	uint32_t seqno1, seqno2;
+	int ret;
 
 	spin_lock(&dev_priv->mm.active_list_lock);
-	lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
-		      list_empty(&dev_priv->mm.active_list);
+	lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
+		       list_empty(&dev_priv->render_ring.active_list) &&
+		       (!HAS_BSD(dev) ||
+			list_empty(&dev_priv->bsd_ring.active_list)));
 	spin_unlock(&dev_priv->mm.active_list_lock);
 
 	if (lists_empty)
@@ -2158,11 +2035,25 @@
 
 	/* Flush everything onto the inactive list. */
 	i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-	seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
-	if (seqno == 0)
+	seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
+			&dev_priv->render_ring);
+	if (seqno1 == 0)
 		return -ENOMEM;
+	ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
 
-	return i915_wait_request(dev, seqno);
+	if (HAS_BSD(dev)) {
+		seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
+				&dev_priv->bsd_ring);
+		if (seqno2 == 0)
+			return -ENOMEM;
+
+		ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
+		if (ret)
+			return ret;
+	}
+
+
+	return ret;
 }
 
 static int
@@ -2175,7 +2066,9 @@
 	spin_lock(&dev_priv->mm.active_list_lock);
 	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
 		       list_empty(&dev_priv->mm.flushing_list) &&
-		       list_empty(&dev_priv->mm.active_list));
+		       list_empty(&dev_priv->render_ring.active_list) &&
+		       (!HAS_BSD(dev)
+			|| list_empty(&dev_priv->bsd_ring.active_list)));
 	spin_unlock(&dev_priv->mm.active_list_lock);
 
 	if (lists_empty)
@@ -2195,7 +2088,9 @@
 	spin_lock(&dev_priv->mm.active_list_lock);
 	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
 		       list_empty(&dev_priv->mm.flushing_list) &&
-		       list_empty(&dev_priv->mm.active_list));
+		       list_empty(&dev_priv->render_ring.active_list) &&
+		       (!HAS_BSD(dev)
+			|| list_empty(&dev_priv->bsd_ring.active_list)));
 	spin_unlock(&dev_priv->mm.active_list_lock);
 	BUG_ON(!lists_empty);
 
@@ -2209,8 +2104,13 @@
 	struct drm_gem_object *obj;
 	int ret;
 
+	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+	struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
 	for (;;) {
-		i915_gem_retire_requests(dev);
+		i915_gem_retire_requests(dev, render_ring);
+
+		if (HAS_BSD(dev))
+			i915_gem_retire_requests(dev, bsd_ring);
 
 		/* If there's an inactive buffer available now, grab it
 		 * and be done.
@@ -2234,14 +2134,30 @@
 		 * things, wait for the next to finish and hopefully leave us
 		 * a buffer to evict.
 		 */
-		if (!list_empty(&dev_priv->mm.request_list)) {
+		if (!list_empty(&render_ring->request_list)) {
 			struct drm_i915_gem_request *request;
 
-			request = list_first_entry(&dev_priv->mm.request_list,
+			request = list_first_entry(&render_ring->request_list,
 						   struct drm_i915_gem_request,
 						   list);
 
-			ret = i915_wait_request(dev, request->seqno);
+			ret = i915_wait_request(dev,
+					request->seqno, request->ring);
+			if (ret)
+				return ret;
+
+			continue;
+		}
+
+		if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) {
+			struct drm_i915_gem_request *request;
+
+			request = list_first_entry(&bsd_ring->request_list,
+						   struct drm_i915_gem_request,
+						   list);
+
+			ret = i915_wait_request(dev,
+					request->seqno, request->ring);
 			if (ret)
 				return ret;
 
@@ -2268,10 +2184,13 @@
 			if (obj != NULL) {
 				uint32_t seqno;
 
-				i915_gem_flush(dev,
+				i915_gem_flush_ring(dev,
 					       obj->write_domain,
-					       obj->write_domain);
-				seqno = i915_add_request(dev, NULL, obj->write_domain);
+					       obj->write_domain,
+					       obj_priv->ring);
+				seqno = i915_add_request(dev, NULL,
+						obj->write_domain,
+						obj_priv->ring);
 				if (seqno == 0)
 					return -ENOMEM;
 				continue;
@@ -2299,6 +2218,9 @@
 	struct inode *inode;
 	struct page *page;
 
+	BUG_ON(obj_priv->pages_refcount
+			== DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
+
 	if (obj_priv->pages_refcount++ != 0)
 		return 0;
 
@@ -2697,6 +2619,14 @@
 		return -EINVAL;
 	}
 
+	/* If the object is bigger than the entire aperture, reject it early
+	 * before evicting everything in a vain attempt to find space.
+	 */
+	if (obj->size > dev->gtt_total) {
+		DRM_ERROR("Attempting to bind an object larger than the aperture\n");
+		return -E2BIG;
+	}
+
  search_free:
 	free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
 					obj->size, alignment, 0);
@@ -2807,6 +2737,7 @@
 {
 	struct drm_device *dev = obj->dev;
 	uint32_t old_write_domain;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
 	if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
 		return;
@@ -2814,7 +2745,7 @@
 	/* Queue the GPU write cache flushing we need. */
 	old_write_domain = obj->write_domain;
 	i915_gem_flush(dev, 0, obj->write_domain);
-	(void) i915_add_request(dev, NULL, obj->write_domain);
+	(void) i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring);
 	BUG_ON(obj->write_domain);
 
 	trace_i915_gem_object_change_domain(obj,
@@ -2954,23 +2885,24 @@
 		DRM_INFO("%s: object %p wait for seqno %08x\n",
 			  __func__, obj, obj_priv->last_rendering_seqno);
 #endif
-		ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
+		ret = i915_do_wait_request(dev,
+				obj_priv->last_rendering_seqno,
+				0,
+				obj_priv->ring);
 		if (ret != 0)
 			return ret;
 	}
 
+	i915_gem_object_flush_cpu_write_domain(obj);
+
 	old_write_domain = obj->write_domain;
 	old_read_domains = obj->read_domains;
 
-	obj->read_domains &= I915_GEM_DOMAIN_GTT;
-
-	i915_gem_object_flush_cpu_write_domain(obj);
-
 	/* It should now be out of any other write domains, and we can update
 	 * the domain values for our changes.
 	 */
 	BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
-	obj->read_domains |= I915_GEM_DOMAIN_GTT;
+	obj->read_domains = I915_GEM_DOMAIN_GTT;
 	obj->write_domain = I915_GEM_DOMAIN_GTT;
 	obj_priv->dirty = 1;
 
@@ -3354,9 +3286,13 @@
 	             obj_priv->tiling_mode != I915_TILING_NONE;
 
 	/* Check fence reg constraints and rebind if necessary */
-	if (need_fence && !i915_gem_object_fence_offset_ok(obj,
-	    obj_priv->tiling_mode))
-		i915_gem_object_unbind(obj);
+	if (need_fence &&
+	    !i915_gem_object_fence_offset_ok(obj,
+					     obj_priv->tiling_mode)) {
+		ret = i915_gem_object_unbind(obj);
+		if (ret)
+			return ret;
+	}
 
 	/* Choose the GTT offset for our buffer and put it there. */
 	ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
@@ -3370,9 +3306,6 @@
 	if (need_fence) {
 		ret = i915_gem_object_get_fence_reg(obj);
 		if (ret != 0) {
-			if (ret != -EBUSY && ret != -ERESTARTSYS)
-				DRM_ERROR("Failure to install fence: %d\n",
-					  ret);
 			i915_gem_object_unpin(obj);
 			return ret;
 		}
@@ -3545,62 +3478,6 @@
 	return 0;
 }
 
-/** Dispatch a batchbuffer to the ring
- */
-static int
-i915_dispatch_gem_execbuffer(struct drm_device *dev,
-			      struct drm_i915_gem_execbuffer2 *exec,
-			      struct drm_clip_rect *cliprects,
-			      uint64_t exec_offset)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	int nbox = exec->num_cliprects;
-	int i = 0, count;
-	uint32_t exec_start, exec_len;
-	RING_LOCALS;
-
-	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
-	exec_len = (uint32_t) exec->batch_len;
-
-	trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
-
-	count = nbox ? nbox : 1;
-
-	for (i = 0; i < count; i++) {
-		if (i < nbox) {
-			int ret = i915_emit_box(dev, cliprects, i,
-						exec->DR1, exec->DR4);
-			if (ret)
-				return ret;
-		}
-
-		if (IS_I830(dev) || IS_845G(dev)) {
-			BEGIN_LP_RING(4);
-			OUT_RING(MI_BATCH_BUFFER);
-			OUT_RING(exec_start | MI_BATCH_NON_SECURE);
-			OUT_RING(exec_start + exec_len - 4);
-			OUT_RING(0);
-			ADVANCE_LP_RING();
-		} else {
-			BEGIN_LP_RING(2);
-			if (IS_I965G(dev)) {
-				OUT_RING(MI_BATCH_BUFFER_START |
-					 (2 << 6) |
-					 MI_BATCH_NON_SECURE_I965);
-				OUT_RING(exec_start);
-			} else {
-				OUT_RING(MI_BATCH_BUFFER_START |
-					 (2 << 6));
-				OUT_RING(exec_start | MI_BATCH_NON_SECURE);
-			}
-			ADVANCE_LP_RING();
-		}
-	}
-
-	/* XXX breadcrumb */
-	return 0;
-}
-
 /* Throttle our rendering by waiting until the ring has completed our requests
  * emitted over 20 msec ago.
  *
@@ -3629,7 +3506,7 @@
 		if (time_after_eq(request->emitted_jiffies, recent_enough))
 			break;
 
-		ret = i915_wait_request(dev, request->seqno);
+		ret = i915_wait_request(dev, request->seqno, request->ring);
 		if (ret != 0)
 			break;
 	}
@@ -3786,10 +3663,22 @@
 	uint32_t seqno, flush_domains, reloc_index;
 	int pin_tries, flips;
 
+	struct intel_ring_buffer *ring = NULL;
+
 #if WATCH_EXEC
 	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
 		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
 #endif
+	if (args->flags & I915_EXEC_BSD) {
+		if (!HAS_BSD(dev)) {
+			DRM_ERROR("execbuf with wrong flag\n");
+			return -EINVAL;
+		}
+		ring = &dev_priv->bsd_ring;
+	} else {
+		ring = &dev_priv->render_ring;
+	}
+
 
 	if (args->buffer_count < 1) {
 		DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
@@ -3902,11 +3791,19 @@
 		if (ret != -ENOSPC || pin_tries >= 1) {
 			if (ret != -ERESTARTSYS) {
 				unsigned long long total_size = 0;
-				for (i = 0; i < args->buffer_count; i++)
+				int num_fences = 0;
+				for (i = 0; i < args->buffer_count; i++) {
+					obj_priv = object_list[i]->driver_private;
+
 					total_size += object_list[i]->size;
-				DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
+					num_fences +=
+						exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
+						obj_priv->tiling_mode != I915_TILING_NONE;
+				}
+				DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
 					  pinned+1, args->buffer_count,
-					  total_size, ret);
+					  total_size, num_fences,
+					  ret);
 				DRM_ERROR("%d objects [%d pinned], "
 					  "%d object bytes [%d pinned], "
 					  "%d/%d gtt bytes\n",
@@ -3976,9 +3873,16 @@
 		i915_gem_flush(dev,
 			       dev->invalidate_domains,
 			       dev->flush_domains);
-		if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
+		if (dev->flush_domains & I915_GEM_GPU_DOMAINS) {
 			(void)i915_add_request(dev, file_priv,
-					       dev->flush_domains);
+					dev->flush_domains,
+					&dev_priv->render_ring);
+
+			if (HAS_BSD(dev))
+				(void)i915_add_request(dev, file_priv,
+						dev->flush_domains,
+						&dev_priv->bsd_ring);
+		}
 	}
 
 	for (i = 0; i < args->buffer_count; i++) {
@@ -4015,7 +3919,8 @@
 #endif
 
 	/* Exec the batchbuffer */
-	ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
+	ret = ring->dispatch_gem_execbuffer(dev, ring, args,
+			cliprects, exec_offset);
 	if (ret) {
 		DRM_ERROR("dispatch failed %d\n", ret);
 		goto err;
@@ -4025,7 +3930,7 @@
 	 * Ensure that the commands in the batch buffer are
 	 * finished before the interrupt fires
 	 */
-	flush_domains = i915_retire_commands(dev);
+	flush_domains = i915_retire_commands(dev, ring);
 
 	i915_verify_inactive(dev, __FILE__, __LINE__);
 
@@ -4036,12 +3941,13 @@
 	 * *some* interrupts representing completion of buffers that we can
 	 * wait on when trying to clear up gtt space).
 	 */
-	seqno = i915_add_request(dev, file_priv, flush_domains);
+	seqno = i915_add_request(dev, file_priv, flush_domains, ring);
 	BUG_ON(seqno == 0);
 	for (i = 0; i < args->buffer_count; i++) {
 		struct drm_gem_object *obj = object_list[i];
+		obj_priv = to_intel_bo(obj);
 
-		i915_gem_object_move_to_active(obj, seqno);
+		i915_gem_object_move_to_active(obj, seqno, ring);
 #if WATCH_LRU
 		DRM_INFO("%s: move to exec list %p\n", __func__, obj);
 #endif
@@ -4153,7 +4059,7 @@
 	exec2.DR4 = args->DR4;
 	exec2.num_cliprects = args->num_cliprects;
 	exec2.cliprects_ptr = args->cliprects_ptr;
-	exec2.flags = 0;
+	exec2.flags = I915_EXEC_RENDER;
 
 	ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
 	if (!ret) {
@@ -4239,7 +4145,20 @@
 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	int ret;
 
+	BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
+
 	i915_verify_inactive(dev, __FILE__, __LINE__);
+
+	if (obj_priv->gtt_space != NULL) {
+		if (alignment == 0)
+			alignment = i915_gem_get_gtt_alignment(obj);
+		if (obj_priv->gtt_offset & (alignment - 1)) {
+			ret = i915_gem_object_unbind(obj);
+			if (ret)
+				return ret;
+		}
+	}
+
 	if (obj_priv->gtt_space == NULL) {
 		ret = i915_gem_object_bind_to_gtt(obj, alignment);
 		if (ret)
@@ -4392,6 +4311,7 @@
 	struct drm_i915_gem_busy *args = data;
 	struct drm_gem_object *obj;
 	struct drm_i915_gem_object *obj_priv;
+	drm_i915_private_t *dev_priv = dev->dev_private;
 
 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 	if (obj == NULL) {
@@ -4406,7 +4326,10 @@
 	 * actually unmasked, and our working set ends up being larger than
 	 * required.
 	 */
-	i915_gem_retire_requests(dev);
+	i915_gem_retire_requests(dev, &dev_priv->render_ring);
+
+	if (HAS_BSD(dev))
+		i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
 
 	obj_priv = to_intel_bo(obj);
 	/* Don't count being on the flushing list against the object being
@@ -4573,7 +4496,10 @@
 
 	mutex_lock(&dev->struct_mutex);
 
-	if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
+	if (dev_priv->mm.suspended ||
+			(dev_priv->render_ring.gem_object == NULL) ||
+			(HAS_BSD(dev) &&
+			 dev_priv->bsd_ring.gem_object == NULL)) {
 		mutex_unlock(&dev->struct_mutex);
 		return 0;
 	}
@@ -4654,71 +4580,6 @@
 	return ret;
 }
 
-static int
-i915_gem_init_hws(struct drm_device *dev)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
-	int ret;
-
-	/* If we need a physical address for the status page, it's already
-	 * initialized at driver load time.
-	 */
-	if (!I915_NEED_GFX_HWS(dev))
-		return 0;
-
-	obj = i915_gem_alloc_object(dev, 4096);
-	if (obj == NULL) {
-		DRM_ERROR("Failed to allocate status page\n");
-		ret = -ENOMEM;
-		goto err;
-	}
-	obj_priv = to_intel_bo(obj);
-	obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
-
-	ret = i915_gem_object_pin(obj, 4096);
-	if (ret != 0) {
-		drm_gem_object_unreference(obj);
-		goto err_unref;
-	}
-
-	dev_priv->status_gfx_addr = obj_priv->gtt_offset;
-
-	dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
-	if (dev_priv->hw_status_page == NULL) {
-		DRM_ERROR("Failed to map status page.\n");
-		memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
-		ret = -EINVAL;
-		goto err_unpin;
-	}
-
-	if (HAS_PIPE_CONTROL(dev)) {
-		ret = i915_gem_init_pipe_control(dev);
-		if (ret)
-			goto err_unpin;
-	}
-
-	dev_priv->hws_obj = obj;
-	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-	if (IS_GEN6(dev)) {
-		I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr);
-		I915_READ(HWS_PGA_GEN6); /* posting read */
-	} else {
-		I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
-		I915_READ(HWS_PGA); /* posting read */
-	}
-	DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
-
-	return 0;
-
-err_unpin:
-	i915_gem_object_unpin(obj);
-err_unref:
-	drm_gem_object_unreference(obj);
-err:
-	return 0;
-}
 
 static void
 i915_gem_cleanup_pipe_control(struct drm_device *dev)
@@ -4737,146 +4598,46 @@
 	dev_priv->seqno_page = NULL;
 }
 
-static void
-i915_gem_cleanup_hws(struct drm_device *dev)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
-
-	if (dev_priv->hws_obj == NULL)
-		return;
-
-	obj = dev_priv->hws_obj;
-	obj_priv = to_intel_bo(obj);
-
-	kunmap(obj_priv->pages[0]);
-	i915_gem_object_unpin(obj);
-	drm_gem_object_unreference(obj);
-	dev_priv->hws_obj = NULL;
-
-	memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
-	dev_priv->hw_status_page = NULL;
-
-	if (HAS_PIPE_CONTROL(dev))
-		i915_gem_cleanup_pipe_control(dev);
-
-	/* Write high address into HWS_PGA when disabling. */
-	I915_WRITE(HWS_PGA, 0x1ffff000);
-}
-
 int
 i915_gem_init_ringbuffer(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
-	drm_i915_ring_buffer_t *ring = &dev_priv->ring;
 	int ret;
-	u32 head;
 
-	ret = i915_gem_init_hws(dev);
-	if (ret != 0)
-		return ret;
+	dev_priv->render_ring = render_ring;
 
-	obj = i915_gem_alloc_object(dev, 128 * 1024);
-	if (obj == NULL) {
-		DRM_ERROR("Failed to allocate ringbuffer\n");
-		i915_gem_cleanup_hws(dev);
-		return -ENOMEM;
-	}
-	obj_priv = to_intel_bo(obj);
-
-	ret = i915_gem_object_pin(obj, 4096);
-	if (ret != 0) {
-		drm_gem_object_unreference(obj);
-		i915_gem_cleanup_hws(dev);
-		return ret;
+	if (!I915_NEED_GFX_HWS(dev)) {
+		dev_priv->render_ring.status_page.page_addr
+			= dev_priv->status_page_dmah->vaddr;
+		memset(dev_priv->render_ring.status_page.page_addr,
+				0, PAGE_SIZE);
 	}
 
-	/* Set up the kernel mapping for the ring. */
-	ring->Size = obj->size;
-
-	ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
-	ring->map.size = obj->size;
-	ring->map.type = 0;
-	ring->map.flags = 0;
-	ring->map.mtrr = 0;
-
-	drm_core_ioremap_wc(&ring->map, dev);
-	if (ring->map.handle == NULL) {
-		DRM_ERROR("Failed to map ringbuffer.\n");
-		memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
-		i915_gem_object_unpin(obj);
-		drm_gem_object_unreference(obj);
-		i915_gem_cleanup_hws(dev);
-		return -EINVAL;
-	}
-	ring->ring_obj = obj;
-	ring->virtual_start = ring->map.handle;
-
-	/* Stop the ring if it's running. */
-	I915_WRITE(PRB0_CTL, 0);
-	I915_WRITE(PRB0_TAIL, 0);
-	I915_WRITE(PRB0_HEAD, 0);
-
-	/* Initialize the ring. */
-	I915_WRITE(PRB0_START, obj_priv->gtt_offset);
-	head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-
-	/* G45 ring initialization fails to reset head to zero */
-	if (head != 0) {
-		DRM_ERROR("Ring head not reset to zero "
-			  "ctl %08x head %08x tail %08x start %08x\n",
-			  I915_READ(PRB0_CTL),
-			  I915_READ(PRB0_HEAD),
-			  I915_READ(PRB0_TAIL),
-			  I915_READ(PRB0_START));
-		I915_WRITE(PRB0_HEAD, 0);
-
-		DRM_ERROR("Ring head forced to zero "
-			  "ctl %08x head %08x tail %08x start %08x\n",
-			  I915_READ(PRB0_CTL),
-			  I915_READ(PRB0_HEAD),
-			  I915_READ(PRB0_TAIL),
-			  I915_READ(PRB0_START));
+	if (HAS_PIPE_CONTROL(dev)) {
+		ret = i915_gem_init_pipe_control(dev);
+		if (ret)
+			return ret;
 	}
 
-	I915_WRITE(PRB0_CTL,
-		   ((obj->size - 4096) & RING_NR_PAGES) |
-		   RING_NO_REPORT |
-		   RING_VALID);
+	ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
+	if (ret)
+		goto cleanup_pipe_control;
 
-	head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-
-	/* If the head is still not zero, the ring is dead */
-	if (head != 0) {
-		DRM_ERROR("Ring initialization failed "
-			  "ctl %08x head %08x tail %08x start %08x\n",
-			  I915_READ(PRB0_CTL),
-			  I915_READ(PRB0_HEAD),
-			  I915_READ(PRB0_TAIL),
-			  I915_READ(PRB0_START));
-		return -EIO;
-	}
-
-	/* Update our cache of the ring state */
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		i915_kernel_lost_context(dev);
-	else {
-		ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-		ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
-		ring->space = ring->head - (ring->tail + 8);
-		if (ring->space < 0)
-			ring->space += ring->Size;
-	}
-
-	if (IS_I9XX(dev) && !IS_GEN3(dev)) {
-		I915_WRITE(MI_MODE,
-			   (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
+	if (HAS_BSD(dev)) {
+		dev_priv->bsd_ring = bsd_ring;
+		ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
+		if (ret)
+			goto cleanup_render_ring;
 	}
 
 	return 0;
+
+cleanup_render_ring:
+	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+cleanup_pipe_control:
+	if (HAS_PIPE_CONTROL(dev))
+		i915_gem_cleanup_pipe_control(dev);
+	return ret;
 }
 
 void
@@ -4884,17 +4645,11 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
-	if (dev_priv->ring.ring_obj == NULL)
-		return;
-
-	drm_core_ioremapfree(&dev_priv->ring.map, dev);
-
-	i915_gem_object_unpin(dev_priv->ring.ring_obj);
-	drm_gem_object_unreference(dev_priv->ring.ring_obj);
-	dev_priv->ring.ring_obj = NULL;
-	memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
-
-	i915_gem_cleanup_hws(dev);
+	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+	if (HAS_BSD(dev))
+		intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+	if (HAS_PIPE_CONTROL(dev))
+		i915_gem_cleanup_pipe_control(dev);
 }
 
 int
@@ -4922,12 +4677,14 @@
 	}
 
 	spin_lock(&dev_priv->mm.active_list_lock);
-	BUG_ON(!list_empty(&dev_priv->mm.active_list));
+	BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
+	BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
 	spin_unlock(&dev_priv->mm.active_list_lock);
 
 	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
 	BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
-	BUG_ON(!list_empty(&dev_priv->mm.request_list));
+	BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
+	BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
 	mutex_unlock(&dev->struct_mutex);
 
 	drm_irq_install(dev);
@@ -4966,18 +4723,20 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
 	spin_lock_init(&dev_priv->mm.active_list_lock);
-	INIT_LIST_HEAD(&dev_priv->mm.active_list);
 	INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
 	INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
 	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
-	INIT_LIST_HEAD(&dev_priv->mm.request_list);
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+	INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
+	INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
+	if (HAS_BSD(dev)) {
+		INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
+		INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
+	}
 	for (i = 0; i < 16; i++)
 		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
 	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
 			  i915_gem_retire_work_handler);
-	dev_priv->mm.next_gem_seqno = 1;
-
 	spin_lock(&shrink_list_lock);
 	list_add(&dev_priv->mm.shrink_list, &shrink_list);
 	spin_unlock(&shrink_list_lock);
@@ -5209,7 +4968,9 @@
 
 	spin_lock(&dev_priv->mm.active_list_lock);
 	lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
-		      list_empty(&dev_priv->mm.active_list);
+		      list_empty(&dev_priv->render_ring.active_list);
+	if (HAS_BSD(dev))
+		lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
 	spin_unlock(&dev_priv->mm.active_list_lock);
 
 	return !lists_empty;
@@ -5254,8 +5015,10 @@
 			continue;
 
 		spin_unlock(&shrink_list_lock);
+		i915_gem_retire_requests(dev, &dev_priv->render_ring);
 
-		i915_gem_retire_requests(dev);
+		if (HAS_BSD(dev))
+			i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
 
 		list_for_each_entry_safe(obj_priv, next_obj,
 					 &dev_priv->mm.inactive_list,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8c3f080..2479be0 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -53,7 +53,7 @@
 	 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
 
 /** Interrupts that we mask and unmask at runtime. */
-#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
+#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
 
 #define I915_PIPE_VBLANK_STATUS	(PIPE_START_VBLANK_INTERRUPT_STATUS |\
 				 PIPE_VBLANK_INTERRUPT_STATUS)
@@ -74,7 +74,7 @@
 	}
 }
 
-static inline void
+void
 ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
 	if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
@@ -115,7 +115,7 @@
 	}
 }
 
-static inline void
+void
 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
 	if ((dev_priv->irq_mask_reg & mask) != mask) {
@@ -278,10 +278,9 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 busy_up, busy_down, max_avg, min_avg;
-	u16 rgvswctl;
 	u8 new_delay = dev_priv->cur_delay;
 
-	I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS) & ~MEMINT_EVAL_CHG);
+	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
 	busy_up = I915_READ(RCPREVBSYTUPAVG);
 	busy_down = I915_READ(RCPREVBSYTDNAVG);
 	max_avg = I915_READ(RCBMAXAVG);
@@ -300,27 +299,8 @@
 			new_delay = dev_priv->min_delay;
 	}
 
-	DRM_DEBUG("rps change requested: %d -> %d\n",
-		  dev_priv->cur_delay, new_delay);
-
-	rgvswctl = I915_READ(MEMSWCTL);
-	if (rgvswctl & MEMCTL_CMD_STS) {
-		DRM_ERROR("gpu busy, RCS change rejected\n");
-		return; /* still busy with another command */
-	}
-
-	/* Program the new state */
-	rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
-		(new_delay << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
-	I915_WRITE(MEMSWCTL, rgvswctl);
-	POSTING_READ(MEMSWCTL);
-
-	rgvswctl |= MEMCTL_CMD_STS;
-	I915_WRITE(MEMSWCTL, rgvswctl);
-
-	dev_priv->cur_delay = new_delay;
-
-	DRM_DEBUG("rps changed\n");
+	if (ironlake_set_drps(dev, new_delay))
+		dev_priv->cur_delay = new_delay;
 
 	return;
 }
@@ -331,6 +311,7 @@
 	int ret = IRQ_NONE;
 	u32 de_iir, gt_iir, de_ier, pch_iir;
 	struct drm_i915_master_private *master_priv;
+	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
 
 	/* disable master interrupt before clearing iir  */
 	de_ier = I915_READ(DEIER);
@@ -354,13 +335,16 @@
 	}
 
 	if (gt_iir & GT_PIPE_NOTIFY) {
-		u32 seqno = i915_get_gem_seqno(dev);
-		dev_priv->mm.irq_gem_seqno = seqno;
+		u32 seqno = render_ring->get_gem_seqno(dev, render_ring);
+		render_ring->irq_gem_seqno = seqno;
 		trace_i915_gem_request_complete(dev, seqno);
-		DRM_WAKEUP(&dev_priv->irq_queue);
+		DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
 		dev_priv->hangcheck_count = 0;
 		mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
 	}
+	if (gt_iir & GT_BSD_USER_INTERRUPT)
+		DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+
 
 	if (de_iir & DE_GSE)
 		ironlake_opregion_gse_intr(dev);
@@ -388,7 +372,7 @@
 	}
 
 	if (de_iir & DE_PCU_EVENT) {
-		I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS));
+		I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
 		i915_handle_rps_change(dev);
 	}
 
@@ -536,17 +520,18 @@
 	 */
 	bbaddr = 0;
 	head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-	ring = (u32 *)(dev_priv->ring.virtual_start + head);
+	ring = (u32 *)(dev_priv->render_ring.virtual_start + head);
 
-	while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
+	while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
 		bbaddr = i915_get_bbaddr(dev, ring);
 		if (bbaddr)
 			break;
 	}
 
 	if (bbaddr == 0) {
-		ring = (u32 *)(dev_priv->ring.virtual_start + dev_priv->ring.Size);
-		while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
+		ring = (u32 *)(dev_priv->render_ring.virtual_start
+				+ dev_priv->render_ring.size);
+		while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
 			bbaddr = i915_get_bbaddr(dev, ring);
 			if (bbaddr)
 				break;
@@ -587,7 +572,7 @@
 		return;
 	}
 
-	error->seqno = i915_get_gem_seqno(dev);
+	error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring);
 	error->eir = I915_READ(EIR);
 	error->pgtbl_er = I915_READ(PGTBL_ER);
 	error->pipeastat = I915_READ(PIPEASTAT);
@@ -615,7 +600,9 @@
 	batchbuffer[0] = NULL;
 	batchbuffer[1] = NULL;
 	count = 0;
-	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+	list_for_each_entry(obj_priv,
+			&dev_priv->render_ring.active_list, list) {
+
 		struct drm_gem_object *obj = &obj_priv->base;
 
 		if (batchbuffer[0] == NULL &&
@@ -639,7 +626,8 @@
 	error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
 
 	/* Record the ringbuffer */
-	error->ringbuffer = i915_error_object_create(dev, dev_priv->ring.ring_obj);
+	error->ringbuffer = i915_error_object_create(dev,
+			dev_priv->render_ring.gem_object);
 
 	/* Record buffers on the active list. */
 	error->active_bo = NULL;
@@ -651,7 +639,8 @@
 
 	if (error->active_bo) {
 		int i = 0;
-		list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+		list_for_each_entry(obj_priv,
+				&dev_priv->render_ring.active_list, list) {
 			struct drm_gem_object *obj = &obj_priv->base;
 
 			error->active_bo[i].size = obj->size;
@@ -703,24 +692,13 @@
 		i915_error_state_free(dev, error);
 }
 
-/**
- * i915_handle_error - handle an error interrupt
- * @dev: drm device
- *
- * Do some basic checking of regsiter state at error interrupt time and
- * dump it to the syslog.  Also call i915_capture_error_state() to make
- * sure we get a record and make it available in debugfs.  Fire a uevent
- * so userspace knows something bad happened (should trigger collection
- * of a ring dump etc.).
- */
-static void i915_handle_error(struct drm_device *dev, bool wedged)
+static void i915_report_and_clear_eir(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 eir = I915_READ(EIR);
-	u32 pipea_stats = I915_READ(PIPEASTAT);
-	u32 pipeb_stats = I915_READ(PIPEBSTAT);
 
-	i915_capture_error_state(dev);
+	if (!eir)
+		return;
 
 	printk(KERN_ERR "render error detected, EIR: 0x%08x\n",
 	       eir);
@@ -766,6 +744,9 @@
 	}
 
 	if (eir & I915_ERROR_MEMORY_REFRESH) {
+		u32 pipea_stats = I915_READ(PIPEASTAT);
+		u32 pipeb_stats = I915_READ(PIPEBSTAT);
+
 		printk(KERN_ERR "memory refresh error\n");
 		printk(KERN_ERR "PIPEASTAT: 0x%08x\n",
 		       pipea_stats);
@@ -822,6 +803,24 @@
 		I915_WRITE(EMR, I915_READ(EMR) | eir);
 		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
 	}
+}
+
+/**
+ * i915_handle_error - handle an error interrupt
+ * @dev: drm device
+ *
+ * Do some basic checking of regsiter state at error interrupt time and
+ * dump it to the syslog.  Also call i915_capture_error_state() to make
+ * sure we get a record and make it available in debugfs.  Fire a uevent
+ * so userspace knows something bad happened (should trigger collection
+ * of a ring dump etc.).
+ */
+static void i915_handle_error(struct drm_device *dev, bool wedged)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	i915_capture_error_state(dev);
+	i915_report_and_clear_eir(dev);
 
 	if (wedged) {
 		atomic_set(&dev_priv->mm.wedged, 1);
@@ -829,7 +828,7 @@
 		/*
 		 * Wakeup waiting processes so they don't hang
 		 */
-		DRM_WAKEUP(&dev_priv->irq_queue);
+		DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
 	}
 
 	queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -848,6 +847,7 @@
 	unsigned long irqflags;
 	int irq_received;
 	int ret = IRQ_NONE;
+	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
 
 	atomic_inc(&dev_priv->irq_received);
 
@@ -928,14 +928,18 @@
 		}
 
 		if (iir & I915_USER_INTERRUPT) {
-			u32 seqno = i915_get_gem_seqno(dev);
-			dev_priv->mm.irq_gem_seqno = seqno;
+			u32 seqno =
+				render_ring->get_gem_seqno(dev, render_ring);
+			render_ring->irq_gem_seqno = seqno;
 			trace_i915_gem_request_complete(dev, seqno);
-			DRM_WAKEUP(&dev_priv->irq_queue);
+			DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
 			dev_priv->hangcheck_count = 0;
 			mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
 		}
 
+		if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
+			DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+
 		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
 			intel_prepare_page_flip(dev, 0);
 
@@ -984,7 +988,6 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-	RING_LOCALS;
 
 	i915_kernel_lost_context(dev);
 
@@ -1006,43 +1009,13 @@
 	return dev_priv->counter;
 }
 
-void i915_user_irq_get(struct drm_device *dev)
-{
-	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	unsigned long irqflags;
-
-	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-	if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
-		if (HAS_PCH_SPLIT(dev))
-			ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
-		else
-			i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
-	}
-	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
-}
-
-void i915_user_irq_put(struct drm_device *dev)
-{
-	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	unsigned long irqflags;
-
-	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-	BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
-	if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
-		if (HAS_PCH_SPLIT(dev))
-			ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
-		else
-			i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
-	}
-	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
-}
-
 void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
 
 	if (dev_priv->trace_irq_seqno == 0)
-		i915_user_irq_get(dev);
+		render_ring->user_irq_get(dev, render_ring);
 
 	dev_priv->trace_irq_seqno = seqno;
 }
@@ -1052,6 +1025,7 @@
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
 	int ret = 0;
+	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
 
 	DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
 		  READ_BREADCRUMB(dev_priv));
@@ -1065,10 +1039,10 @@
 	if (master_priv->sarea_priv)
 		master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
 
-	i915_user_irq_get(dev);
-	DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
+	render_ring->user_irq_get(dev, render_ring);
+	DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ,
 		    READ_BREADCRUMB(dev_priv) >= irq_nr);
-	i915_user_irq_put(dev);
+	render_ring->user_irq_put(dev, render_ring);
 
 	if (ret == -EBUSY) {
 		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
@@ -1087,7 +1061,7 @@
 	drm_i915_irq_emit_t *emit = data;
 	int result;
 
-	if (!dev_priv || !dev_priv->ring.virtual_start) {
+	if (!dev_priv || !dev_priv->render_ring.virtual_start) {
 		DRM_ERROR("called with no initialization\n");
 		return -EINVAL;
 	}
@@ -1233,9 +1207,12 @@
 	return -EINVAL;
 }
 
-struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) {
+struct drm_i915_gem_request *
+i915_get_tail_request(struct drm_device *dev)
+{
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list);
+	return list_entry(dev_priv->render_ring.request_list.prev,
+			struct drm_i915_gem_request, list);
 }
 
 /**
@@ -1260,8 +1237,10 @@
 		acthd = I915_READ(ACTHD_I965);
 
 	/* If all work is done then ACTHD clearly hasn't advanced. */
-	if (list_empty(&dev_priv->mm.request_list) ||
-		       i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) {
+	if (list_empty(&dev_priv->render_ring.request_list) ||
+		i915_seqno_passed(i915_get_gem_seqno(dev,
+				&dev_priv->render_ring),
+			i915_get_tail_request(dev)->seqno)) {
 		dev_priv->hangcheck_count = 0;
 		return;
 	}
@@ -1314,7 +1293,7 @@
 	/* enable kind of interrupts always enabled */
 	u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
 			   DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
-	u32 render_mask = GT_PIPE_NOTIFY;
+	u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
 	u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
 			   SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
 
@@ -1328,7 +1307,7 @@
 	(void) I915_READ(DEIER);
 
 	/* user interrupt should be enabled, but masked initial */
-	dev_priv->gt_irq_mask_reg = 0xffffffff;
+	dev_priv->gt_irq_mask_reg = ~render_mask;
 	dev_priv->gt_irq_enable_reg = render_mask;
 
 	I915_WRITE(GTIIR, I915_READ(GTIIR));
@@ -1391,7 +1370,10 @@
 	u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
 	u32 error_mask;
 
-	DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
+	DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
+
+	if (HAS_BSD(dev))
+		DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
 
 	dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
 
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f3e39cc..64b0a3a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -334,6 +334,7 @@
 #define   I915_DEBUG_INTERRUPT				(1<<2)
 #define   I915_USER_INTERRUPT				(1<<1)
 #define   I915_ASLE_INTERRUPT				(1<<0)
+#define   I915_BSD_USER_INTERRUPT                      (1<<25)
 #define EIR		0x020b0
 #define EMR		0x020b4
 #define ESR		0x020b8
@@ -368,6 +369,36 @@
 #define BB_ADDR		0x02140 /* 8 bytes */
 #define GFX_FLSH_CNTL	0x02170 /* 915+ only */
 
+/* GEN6 interrupt control */
+#define GEN6_RENDER_HWSTAM	0x2098
+#define GEN6_RENDER_IMR		0x20a8
+#define   GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT		(1 << 8)
+#define   GEN6_RENDER_PPGTT_PAGE_FAULT			(1 << 7)
+#define   GEN6_RENDER TIMEOUT_COUNTER_EXPIRED		(1 << 6)
+#define   GEN6_RENDER_L3_PARITY_ERROR			(1 << 5)
+#define   GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT	(1 << 4)
+#define   GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR	(1 << 3)
+#define   GEN6_RENDER_SYNC_STATUS			(1 << 2)
+#define   GEN6_RENDER_DEBUG_INTERRUPT			(1 << 1)
+#define   GEN6_RENDER_USER_INTERRUPT			(1 << 0)
+
+#define GEN6_BLITTER_HWSTAM	0x22098
+#define GEN6_BLITTER_IMR	0x220a8
+#define   GEN6_BLITTER_MI_FLUSH_DW_NOTIFY_INTERRUPT	(1 << 26)
+#define   GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR	(1 << 25)
+#define   GEN6_BLITTER_SYNC_STATUS			(1 << 24)
+#define   GEN6_BLITTER_USER_INTERRUPT			(1 << 22)
+/*
+ * BSD (bit stream decoder instruction and interrupt control register defines
+ * (G4X and Ironlake only)
+ */
+
+#define BSD_RING_TAIL          0x04030
+#define BSD_RING_HEAD          0x04034
+#define BSD_RING_START         0x04038
+#define BSD_RING_CTL           0x0403c
+#define BSD_RING_ACTHD         0x04074
+#define BSD_HWS_PGA            0x04080
 
 /*
  * Framebuffer compression (915+ only)
@@ -805,6 +836,10 @@
 #define DCC_CHANNEL_XOR_DISABLE				(1 << 10)
 #define DCC_CHANNEL_XOR_BIT_17				(1 << 9)
 
+/** Pineview MCH register contains DDR3 setting */
+#define CSHRDDR3CTL            0x101a8
+#define CSHRDDR3CTL_DDR3       (1 << 2)
+
 /** 965 MCH register controlling DRAM channel configuration */
 #define C0DRB3			0x10206
 #define C1DRB3			0x10606
@@ -826,6 +861,12 @@
 #define CLKCFG_MEM_800					(3 << 4)
 #define CLKCFG_MEM_MASK					(7 << 4)
 
+#define TR1			0x11006
+#define TSFS			0x11020
+#define   TSFS_SLOPE_MASK	0x0000ff00
+#define   TSFS_SLOPE_SHIFT	8
+#define   TSFS_INTR_MASK	0x000000ff
+
 #define CRSTANDVID		0x11100
 #define PXVFREQ_BASE		0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
 #define   PXVFREQ_PX_MASK	0x7f000000
@@ -964,6 +1005,41 @@
 #define   MEMSTAT_SRC_CTL_STDBY 3
 #define RCPREVBSYTUPAVG		0x113b8
 #define RCPREVBSYTDNAVG		0x113bc
+#define SDEW			0x1124c
+#define CSIEW0			0x11250
+#define CSIEW1			0x11254
+#define CSIEW2			0x11258
+#define PEW			0x1125c
+#define DEW			0x11270
+#define MCHAFE			0x112c0
+#define CSIEC			0x112e0
+#define DMIEC			0x112e4
+#define DDREC			0x112e8
+#define PEG0EC			0x112ec
+#define PEG1EC			0x112f0
+#define GFXEC			0x112f4
+#define RPPREVBSYTUPAVG		0x113b8
+#define RPPREVBSYTDNAVG		0x113bc
+#define ECR			0x11600
+#define   ECR_GPFE		(1<<31)
+#define   ECR_IMONE		(1<<30)
+#define   ECR_CAP_MASK		0x0000001f /* Event range, 0-31 */
+#define OGW0			0x11608
+#define OGW1			0x1160c
+#define EG0			0x11610
+#define EG1			0x11614
+#define EG2			0x11618
+#define EG3			0x1161c
+#define EG4			0x11620
+#define EG5			0x11624
+#define EG6			0x11628
+#define EG7			0x1162c
+#define PXW			0x11664
+#define PXWL			0x11680
+#define LCFUSE02		0x116c0
+#define   LCFUSE_HIV_MASK	0x000000ff
+#define CSIPLL0			0x12c10
+#define DDRMPLL1		0X12c20
 #define PEG_BAND_GAP_DATA	0x14d68
 
 /*
@@ -1055,7 +1131,6 @@
 #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV	(0 << 2)
 #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV	(1 << 2)
 #define CRT_HOTPLUG_MASK			(0x3fc) /* Bits 9-2 */
-#define CRT_FORCE_HOTPLUG_MASK			0xfffffe1f
 
 #define PORT_HOTPLUG_STAT	0x61114
 #define   HDMIB_HOTPLUG_INT_STATUS		(1 << 29)
@@ -2355,6 +2430,8 @@
 #define GT_PIPE_NOTIFY		(1 << 4)
 #define GT_SYNC_STATUS          (1 << 2)
 #define GT_USER_INTERRUPT       (1 << 0)
+#define GT_BSD_USER_INTERRUPT   (1 << 5)
+
 
 #define GTISR   0x44010
 #define GTIMR   0x44014
@@ -2690,6 +2767,9 @@
 #define  SDVO_ENCODING          (0)
 #define  TMDS_ENCODING          (2 << 10)
 #define  NULL_PACKET_VSYNC_ENABLE       (1 << 9)
+/* CPT */
+#define  HDMI_MODE_SELECT	(1 << 9)
+#define  DVI_MODE_SELECT	(0)
 #define  SDVOB_BORDER_ENABLE    (1 << 7)
 #define  AUDIO_ENABLE           (1 << 6)
 #define  VSYNC_ACTIVE_HIGH      (1 << 4)
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 9e4c45f..fab2176 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -53,23 +53,6 @@
 		      __entry->obj, __entry->gtt_offset)
 );
 
-TRACE_EVENT(i915_gem_object_clflush,
-
-	    TP_PROTO(struct drm_gem_object *obj),
-
-	    TP_ARGS(obj),
-
-	    TP_STRUCT__entry(
-			     __field(struct drm_gem_object *, obj)
-			     ),
-
-	    TP_fast_assign(
-			   __entry->obj = obj;
-			   ),
-
-	    TP_printk("obj=%p", __entry->obj)
-);
-
 TRACE_EVENT(i915_gem_object_change_domain,
 
 	    TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
@@ -132,6 +115,13 @@
 	    TP_printk("obj=%p", __entry->obj)
 );
 
+DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
+
+	    TP_PROTO(struct drm_gem_object *obj),
+
+	    TP_ARGS(obj)
+);
+
 DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
 
 	    TP_PROTO(struct drm_gem_object *obj),
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 4c748d8..96f75d7 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -95,6 +95,16 @@
 	panel_fixed_mode->clock = dvo_timing->clock * 10;
 	panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
 
+	if (dvo_timing->hsync_positive)
+		panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+	else
+		panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+
+	if (dvo_timing->vsync_positive)
+		panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+	else
+		panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
+
 	/* Some VBTs have bogus h/vtotal values */
 	if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
 		panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index e16ac5a..22ff384 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -217,7 +217,8 @@
 {
 	struct drm_device *dev = connector->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 hotplug_en;
+	u32 hotplug_en, orig, stat;
+	bool ret = false;
 	int i, tries = 0;
 
 	if (HAS_PCH_SPLIT(dev))
@@ -232,8 +233,8 @@
 		tries = 2;
 	else
 		tries = 1;
-	hotplug_en = I915_READ(PORT_HOTPLUG_EN);
-	hotplug_en &= CRT_FORCE_HOTPLUG_MASK;
+	hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN);
+	hotplug_en &= CRT_HOTPLUG_MASK;
 	hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
 
 	if (IS_G4X(dev))
@@ -255,11 +256,17 @@
 		} while (time_after(timeout, jiffies));
 	}
 
-	if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) !=
-	    CRT_HOTPLUG_MONITOR_NONE)
-		return true;
+	stat = I915_READ(PORT_HOTPLUG_STAT);
+	if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE)
+		ret = true;
 
-	return false;
+	/* clear the interrupt we just generated, if any */
+	I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
+
+	/* and put the bits back */
+	I915_WRITE(PORT_HOTPLUG_EN, orig);
+
+	return ret;
 }
 
 static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
@@ -569,7 +576,7 @@
 				   (1 << INTEL_ANALOG_CLONE_BIT) |
 				   (1 << INTEL_SDVO_LVDS_CLONE_BIT);
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
-	connector->interlace_allowed = 0;
+	connector->interlace_allowed = 1;
 	connector->doublescan_allowed = 0;
 
 	drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f469a84..88a1ab7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1029,19 +1029,28 @@
 void i8xx_disable_fbc(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long timeout = jiffies + msecs_to_jiffies(1);
 	u32 fbc_ctl;
 
 	if (!I915_HAS_FBC(dev))
 		return;
 
+	if (!(I915_READ(FBC_CONTROL) & FBC_CTL_EN))
+		return;	/* Already off, just return */
+
 	/* Disable compression */
 	fbc_ctl = I915_READ(FBC_CONTROL);
 	fbc_ctl &= ~FBC_CTL_EN;
 	I915_WRITE(FBC_CONTROL, fbc_ctl);
 
 	/* Wait for compressing bit to clear */
-	while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING)
-		; /* nothing */
+	while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) {
+		if (time_after(jiffies, timeout)) {
+			DRM_DEBUG_DRIVER("FBC idle timed out\n");
+			break;
+		}
+		; /* do nothing */
+	}
 
 	intel_wait_for_vblank(dev);
 
@@ -1239,10 +1248,11 @@
 	return;
 
 out_disable:
-	DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
 	/* Multiple disables should be harmless */
-	if (intel_fbc_enabled(dev))
+	if (intel_fbc_enabled(dev)) {
+		DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
 		intel_disable_fbc(dev);
+	}
 }
 
 static int
@@ -1386,7 +1396,8 @@
 	Start = obj_priv->gtt_offset;
 	Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
 
-	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
+	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+		      Start, Offset, x, y, crtc->fb->pitch);
 	I915_WRITE(dspstride, crtc->fb->pitch);
 	if (IS_I965G(dev)) {
 		I915_WRITE(dspbase, Offset);
@@ -2345,6 +2356,8 @@
 		if (mode->clock * 3 > 27000 * 4)
 			return MODE_CLOCK_HIGH;
 	}
+
+	drm_mode_set_crtcinfo(adjusted_mode, 0);
 	return true;
 }
 
@@ -2629,6 +2642,7 @@
 
 struct cxsr_latency {
 	int is_desktop;
+	int is_ddr3;
 	unsigned long fsb_freq;
 	unsigned long mem_freq;
 	unsigned long display_sr;
@@ -2638,33 +2652,45 @@
 };
 
 static struct cxsr_latency cxsr_latency_table[] = {
-	{1, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
-	{1, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
-	{1, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
+	{1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
+	{1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
+	{1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
+	{1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
+	{1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
 
-	{1, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
-	{1, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
-	{1, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
+	{1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
+	{1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
+	{1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
+	{1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
+	{1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
 
-	{1, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
-	{1, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
-	{1, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
+	{1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
+	{1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
+	{1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
+	{1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
+	{1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
 
-	{0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
-	{0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
-	{0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
+	{0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
+	{0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
+	{0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
+	{0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
+	{0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
 
-	{0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
-	{0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
-	{0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
+	{0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
+	{0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
+	{0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
+	{0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
+	{0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
 
-	{0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
-	{0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
-	{0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
+	{0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
+	{0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
+	{0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
+	{0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
+	{0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
 };
 
-static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb,
-						   int mem)
+static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3, 
+						   int fsb, int mem)
 {
 	int i;
 	struct cxsr_latency *latency;
@@ -2675,6 +2701,7 @@
 	for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
 		latency = &cxsr_latency_table[i];
 		if (is_desktop == latency->is_desktop &&
+		    is_ddr3 == latency->is_ddr3 &&
 		    fsb == latency->fsb_freq && mem == latency->mem_freq)
 			return latency;
 	}
@@ -2789,8 +2816,8 @@
 	struct cxsr_latency *latency;
 	int sr_clock;
 
-	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
-					 dev_priv->mem_freq);
+	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, 
+					 dev_priv->fsb_freq, dev_priv->mem_freq);
 	if (!latency) {
 		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
 		pineview_disable_cxsr(dev);
@@ -3772,6 +3799,18 @@
 		}
 	}
 
+	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
+		/* the chip adds 2 halflines automatically */
+		adjusted_mode->crtc_vdisplay -= 1;
+		adjusted_mode->crtc_vtotal -= 1;
+		adjusted_mode->crtc_vblank_start -= 1;
+		adjusted_mode->crtc_vblank_end -= 1;
+		adjusted_mode->crtc_vsync_end -= 1;
+		adjusted_mode->crtc_vsync_start -= 1;
+	} else
+		pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
+
 	I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
 		   ((adjusted_mode->crtc_htotal - 1) << 16));
 	I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
@@ -4436,6 +4475,8 @@
 
 	mutex_lock(&dev->struct_mutex);
 
+	i915_update_gfx_val(dev_priv);
+
 	if (IS_I945G(dev) || IS_I945GM(dev)) {
 		DRM_DEBUG_DRIVER("enable memory self refresh on 945\n");
 		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
@@ -4564,12 +4605,6 @@
 	spin_lock_irqsave(&dev->event_lock, flags);
 	work = intel_crtc->unpin_work;
 	if (work == NULL || !work->pending) {
-		if (work && !work->pending) {
-			obj_priv = to_intel_bo(work->pending_flip_obj);
-			DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
-					 obj_priv,
-					 atomic_read(&obj_priv->pending_flip));
-		}
 		spin_unlock_irqrestore(&dev->event_lock, flags);
 		return;
 	}
@@ -4629,14 +4664,11 @@
 	unsigned long flags;
 	int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
 	int ret, pipesrc;
-	RING_LOCALS;
 
 	work = kzalloc(sizeof *work, GFP_KERNEL);
 	if (work == NULL)
 		return -ENOMEM;
 
-	mutex_lock(&dev->struct_mutex);
-
 	work->event = event;
 	work->dev = crtc->dev;
 	intel_fb = to_intel_framebuffer(crtc->fb);
@@ -4646,10 +4678,10 @@
 	/* We borrow the event spin lock for protecting unpin_work */
 	spin_lock_irqsave(&dev->event_lock, flags);
 	if (intel_crtc->unpin_work) {
-		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
 		spin_unlock_irqrestore(&dev->event_lock, flags);
 		kfree(work);
-		mutex_unlock(&dev->struct_mutex);
+
+		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
 		return -EBUSY;
 	}
 	intel_crtc->unpin_work = work;
@@ -4658,13 +4690,19 @@
 	intel_fb = to_intel_framebuffer(fb);
 	obj = intel_fb->obj;
 
+	mutex_lock(&dev->struct_mutex);
 	ret = intel_pin_and_fence_fb_obj(dev, obj);
 	if (ret != 0) {
-		DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
-			  to_intel_bo(obj));
-		kfree(work);
-		intel_crtc->unpin_work = NULL;
 		mutex_unlock(&dev->struct_mutex);
+
+		spin_lock_irqsave(&dev->event_lock, flags);
+		intel_crtc->unpin_work = NULL;
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+
+		kfree(work);
+
+		DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
+				 to_intel_bo(obj));
 		return ret;
 	}
 
@@ -5023,10 +5061,32 @@
 	return NULL;
 }
 
+bool ironlake_set_drps(struct drm_device *dev, u8 val)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u16 rgvswctl;
+
+	rgvswctl = I915_READ16(MEMSWCTL);
+	if (rgvswctl & MEMCTL_CMD_STS) {
+		DRM_DEBUG("gpu busy, RCS change rejected\n");
+		return false; /* still busy with another command */
+	}
+
+	rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+		(val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
+	I915_WRITE16(MEMSWCTL, rgvswctl);
+	POSTING_READ16(MEMSWCTL);
+
+	rgvswctl |= MEMCTL_CMD_STS;
+	I915_WRITE16(MEMSWCTL, rgvswctl);
+
+	return true;
+}
+
 void ironlake_enable_drps(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 rgvmodectl = I915_READ(MEMMODECTL), rgvswctl;
+	u32 rgvmodectl = I915_READ(MEMMODECTL);
 	u8 fmax, fmin, fstart, vstart;
 	int i = 0;
 
@@ -5045,13 +5105,21 @@
 	fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
 	fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
 		MEMMODE_FSTART_SHIFT;
+	fstart = fmax;
+
 	vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
 		PXVFREQ_PX_SHIFT;
 
-	dev_priv->max_delay = fstart; /* can't go to fmax w/o IPS */
+	dev_priv->fmax = fstart; /* IPS callback will increase this */
+	dev_priv->fstart = fstart;
+
+	dev_priv->max_delay = fmax;
 	dev_priv->min_delay = fmin;
 	dev_priv->cur_delay = fstart;
 
+	DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin,
+			 fstart);
+
 	I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
 
 	/*
@@ -5073,20 +5141,19 @@
 	}
 	msleep(1);
 
-	rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
-		(fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
-	I915_WRITE(MEMSWCTL, rgvswctl);
-	POSTING_READ(MEMSWCTL);
+	ironlake_set_drps(dev, fstart);
 
-	rgvswctl |= MEMCTL_CMD_STS;
-	I915_WRITE(MEMSWCTL, rgvswctl);
+	dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
+		I915_READ(0x112e0);
+	dev_priv->last_time1 = jiffies_to_msecs(jiffies);
+	dev_priv->last_count2 = I915_READ(0x112f4);
+	getrawmonotonic(&dev_priv->last_time2);
 }
 
 void ironlake_disable_drps(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 rgvswctl;
-	u8 fstart;
+	u16 rgvswctl = I915_READ16(MEMSWCTL);
 
 	/* Ack interrupts, disable EFC interrupt */
 	I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
@@ -5096,11 +5163,7 @@
 	I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
 
 	/* Go back to the starting frequency */
-	fstart = (I915_READ(MEMMODECTL) & MEMMODE_FSTART_MASK) >>
-		MEMMODE_FSTART_SHIFT;
-	rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
-		(fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
-	I915_WRITE(MEMSWCTL, rgvswctl);
+	ironlake_set_drps(dev, dev_priv->fstart);
 	msleep(1);
 	rgvswctl |= MEMCTL_CMD_STS;
 	I915_WRITE(MEMSWCTL, rgvswctl);
@@ -5108,6 +5171,92 @@
 
 }
 
+static unsigned long intel_pxfreq(u32 vidfreq)
+{
+	unsigned long freq;
+	int div = (vidfreq & 0x3f0000) >> 16;
+	int post = (vidfreq & 0x3000) >> 12;
+	int pre = (vidfreq & 0x7);
+
+	if (!pre)
+		return 0;
+
+	freq = ((div * 133333) / ((1<<post) * pre));
+
+	return freq;
+}
+
+void intel_init_emon(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 lcfuse;
+	u8 pxw[16];
+	int i;
+
+	/* Disable to program */
+	I915_WRITE(ECR, 0);
+	POSTING_READ(ECR);
+
+	/* Program energy weights for various events */
+	I915_WRITE(SDEW, 0x15040d00);
+	I915_WRITE(CSIEW0, 0x007f0000);
+	I915_WRITE(CSIEW1, 0x1e220004);
+	I915_WRITE(CSIEW2, 0x04000004);
+
+	for (i = 0; i < 5; i++)
+		I915_WRITE(PEW + (i * 4), 0);
+	for (i = 0; i < 3; i++)
+		I915_WRITE(DEW + (i * 4), 0);
+
+	/* Program P-state weights to account for frequency power adjustment */
+	for (i = 0; i < 16; i++) {
+		u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
+		unsigned long freq = intel_pxfreq(pxvidfreq);
+		unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
+			PXVFREQ_PX_SHIFT;
+		unsigned long val;
+
+		val = vid * vid;
+		val *= (freq / 1000);
+		val *= 255;
+		val /= (127*127*900);
+		if (val > 0xff)
+			DRM_ERROR("bad pxval: %ld\n", val);
+		pxw[i] = val;
+	}
+	/* Render standby states get 0 weight */
+	pxw[14] = 0;
+	pxw[15] = 0;
+
+	for (i = 0; i < 4; i++) {
+		u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
+			(pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
+		I915_WRITE(PXW + (i * 4), val);
+	}
+
+	/* Adjust magic regs to magic values (more experimental results) */
+	I915_WRITE(OGW0, 0);
+	I915_WRITE(OGW1, 0);
+	I915_WRITE(EG0, 0x00007f00);
+	I915_WRITE(EG1, 0x0000000e);
+	I915_WRITE(EG2, 0x000e0000);
+	I915_WRITE(EG3, 0x68000300);
+	I915_WRITE(EG4, 0x42000000);
+	I915_WRITE(EG5, 0x00140031);
+	I915_WRITE(EG6, 0);
+	I915_WRITE(EG7, 0);
+
+	for (i = 0; i < 8; i++)
+		I915_WRITE(PXWL + (i * 4), 0);
+
+	/* Enable PMON + select events */
+	I915_WRITE(ECR, 0x80000019);
+
+	lcfuse = I915_READ(LCFUSE02);
+
+	dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
+}
+
 void intel_init_clock_gating(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5277,11 +5426,13 @@
 			dev_priv->display.update_wm = NULL;
 	} else if (IS_PINEVIEW(dev)) {
 		if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+					    dev_priv->is_ddr3,
 					    dev_priv->fsb_freq,
 					    dev_priv->mem_freq)) {
 			DRM_INFO("failed to find known CxSR latency "
-				 "(found fsb freq %d, mem freq %d), "
+				 "(found ddr%s fsb freq %d, mem freq %d), "
 				 "disabling CxSR\n",
+				 (dev_priv->is_ddr3 == 1) ? "3": "2",
 				 dev_priv->fsb_freq, dev_priv->mem_freq);
 			/* Disable CxSR and never update its watermark again */
 			pineview_disable_cxsr(dev);
@@ -5354,8 +5505,10 @@
 
 	intel_init_clock_gating(dev);
 
-	if (IS_IRONLAKE_M(dev))
+	if (IS_IRONLAKE_M(dev)) {
 		ironlake_enable_drps(dev);
+		intel_init_emon(dev);
+	}
 
 	INIT_WORK(&dev_priv->idle_work, intel_idle_update);
 	setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 6b1c9a2..49b54f0 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -576,7 +576,7 @@
 		struct intel_encoder *intel_encoder;
 		struct intel_dp_priv *dp_priv;
 
-		if (!encoder || encoder->crtc != crtc)
+		if (encoder->crtc != crtc)
 			continue;
 
 		intel_encoder = enc_to_intel_encoder(encoder);
@@ -675,10 +675,9 @@
 	dp_priv->link_configuration[1] = dp_priv->lane_count;
 
 	/*
-	 * Check for DPCD version > 1.1,
-	 * enable enahanced frame stuff in that case
+	 * Check for DPCD version > 1.1 and enhanced framing support
 	 */
-	if (dp_priv->dpcd[0] >= 0x11) {
+	if (dp_priv->dpcd[0] >= 0x11 && (dp_priv->dpcd[2] & DP_ENHANCED_FRAME_CAP)) {
 		dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
 		dp_priv->DP |= DP_ENHANCED_FRAMING;
 	}
@@ -1208,6 +1207,8 @@
 		if (dp_priv->dpcd[0] != 0)
 			status = connector_status_connected;
 	}
+	DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", dp_priv->dpcd[0],
+		      dp_priv->dpcd[1], dp_priv->dpcd[2], dp_priv->dpcd[3]);
 	return status;
 }
 
@@ -1352,7 +1353,7 @@
 	struct intel_encoder *intel_encoder = NULL;
 
 	list_for_each_entry(encoder, &mode_config->encoder_list, head) {
-		if (!encoder || encoder->crtc != crtc)
+		if (encoder->crtc != crtc)
 			continue;
 
 		intel_encoder = enc_to_intel_encoder(encoder);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 6f53cf7..f8c76e6 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -105,7 +105,11 @@
 	}
 
 	/* Flush everything out, we'll be doing GTT only from now on */
-	i915_gem_object_set_to_gtt_domain(fbo, 1);
+	ret = i915_gem_object_set_to_gtt_domain(fbo, 1);
+	if (ret) {
+		DRM_ERROR("failed to bind fb: %d.\n", ret);
+		goto out_unpin;
+	}
 
 	info = framebuffer_alloc(0, device);
 	if (!info) {
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 65727f0..83bd764 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -59,8 +59,11 @@
 		SDVO_VSYNC_ACTIVE_HIGH |
 		SDVO_HSYNC_ACTIVE_HIGH;
 
-	if (hdmi_priv->has_hdmi_sink)
+	if (hdmi_priv->has_hdmi_sink) {
 		sdvox |= SDVO_AUDIO_ENABLE;
+		if (HAS_PCH_CPT(dev))
+			sdvox |= HDMI_MODE_SELECT;
+	}
 
 	if (intel_crtc->pipe == 1) {
 		if (HAS_PCH_CPT(dev))
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index b0e17b0..d7ad513 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -211,9 +211,8 @@
 static int intel_overlay_on(struct intel_overlay *overlay)
 {
 	struct drm_device *dev = overlay->dev;
-        drm_i915_private_t *dev_priv = dev->dev_private;
 	int ret;
-	RING_LOCALS;
+	drm_i915_private_t *dev_priv = dev->dev_private;
 
 	BUG_ON(overlay->active);
 
@@ -227,11 +226,13 @@
 	OUT_RING(MI_NOOP);
 	ADVANCE_LP_RING();
 
-	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+	overlay->last_flip_req =
+		i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
 	if (overlay->last_flip_req == 0)
 		return -ENOMEM;
 
-	ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+	ret = i915_do_wait_request(dev,
+			overlay->last_flip_req, 1, &dev_priv->render_ring);
 	if (ret != 0)
 		return ret;
 
@@ -248,7 +249,6 @@
         drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 flip_addr = overlay->flip_addr;
 	u32 tmp;
-	RING_LOCALS;
 
 	BUG_ON(!overlay->active);
 
@@ -265,7 +265,8 @@
 	OUT_RING(flip_addr);
         ADVANCE_LP_RING();
 
-	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+	overlay->last_flip_req =
+		i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
 }
 
 static int intel_overlay_wait_flip(struct intel_overlay *overlay)
@@ -274,10 +275,10 @@
         drm_i915_private_t *dev_priv = dev->dev_private;
 	int ret;
 	u32 tmp;
-	RING_LOCALS;
 
 	if (overlay->last_flip_req != 0) {
-		ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+		ret = i915_do_wait_request(dev, overlay->last_flip_req,
+				1, &dev_priv->render_ring);
 		if (ret == 0) {
 			overlay->last_flip_req = 0;
 
@@ -296,11 +297,13 @@
         OUT_RING(MI_NOOP);
         ADVANCE_LP_RING();
 
-	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+	overlay->last_flip_req =
+		i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
 	if (overlay->last_flip_req == 0)
 		return -ENOMEM;
 
-	ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+	ret = i915_do_wait_request(dev, overlay->last_flip_req,
+			1, &dev_priv->render_ring);
 	if (ret != 0)
 		return ret;
 
@@ -314,9 +317,8 @@
 {
 	u32 flip_addr = overlay->flip_addr;
 	struct drm_device *dev = overlay->dev;
-        drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_private_t *dev_priv = dev->dev_private;
 	int ret;
-	RING_LOCALS;
 
 	BUG_ON(!overlay->active);
 
@@ -336,11 +338,13 @@
         OUT_RING(MI_NOOP);
         ADVANCE_LP_RING();
 
-	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+	overlay->last_flip_req =
+		i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
 	if (overlay->last_flip_req == 0)
 		return -ENOMEM;
 
-	ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+	ret = i915_do_wait_request(dev, overlay->last_flip_req,
+			1, &dev_priv->render_ring);
 	if (ret != 0)
 		return ret;
 
@@ -354,11 +358,13 @@
         OUT_RING(MI_NOOP);
 	ADVANCE_LP_RING();
 
-	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+	overlay->last_flip_req =
+		i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
 	if (overlay->last_flip_req == 0)
 		return -ENOMEM;
 
-	ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+	ret = i915_do_wait_request(dev, overlay->last_flip_req,
+			1, &dev_priv->render_ring);
 	if (ret != 0)
 		return ret;
 
@@ -390,22 +396,23 @@
 					 int interruptible)
 {
 	struct drm_device *dev = overlay->dev;
-        drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_gem_object *obj;
+	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 flip_addr;
 	int ret;
-	RING_LOCALS;
 
 	if (overlay->hw_wedged == HW_WEDGED)
 		return -EIO;
 
 	if (overlay->last_flip_req == 0) {
-		overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+		overlay->last_flip_req =
+			i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
 		if (overlay->last_flip_req == 0)
 			return -ENOMEM;
 	}
 
-	ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible);
+	ret = i915_do_wait_request(dev, overlay->last_flip_req,
+			interruptible, &dev_priv->render_ring);
 	if (ret != 0)
 		return ret;
 
@@ -429,12 +436,13 @@
 			OUT_RING(MI_NOOP);
 			ADVANCE_LP_RING();
 
-			overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+			overlay->last_flip_req = i915_add_request(dev, NULL,
+					0, &dev_priv->render_ring);
 			if (overlay->last_flip_req == 0)
 				return -ENOMEM;
 
 			ret = i915_do_wait_request(dev, overlay->last_flip_req,
-					interruptible);
+					interruptible, &dev_priv->render_ring);
 			if (ret != 0)
 				return ret;
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
new file mode 100644
index 0000000..cea4f1a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -0,0 +1,849 @@
+/*
+ * Copyright © 2008-2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *    Zou Nan hai <nanhai.zou@intel.com>
+ *    Xiang Hai hao<haihao.xiang@intel.com>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drv.h"
+#include "i915_drm.h"
+#include "i915_trace.h"
+
+static void
+render_ring_flush(struct drm_device *dev,
+		struct intel_ring_buffer *ring,
+		u32	invalidate_domains,
+		u32	flush_domains)
+{
+#if WATCH_EXEC
+	DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
+		  invalidate_domains, flush_domains);
+#endif
+	u32 cmd;
+	trace_i915_gem_request_flush(dev, ring->next_seqno,
+				     invalidate_domains, flush_domains);
+
+	if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
+		/*
+		 * read/write caches:
+		 *
+		 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+		 * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
+		 * also flushed at 2d versus 3d pipeline switches.
+		 *
+		 * read-only caches:
+		 *
+		 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+		 * MI_READ_FLUSH is set, and is always flushed on 965.
+		 *
+		 * I915_GEM_DOMAIN_COMMAND may not exist?
+		 *
+		 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+		 * invalidated when MI_EXE_FLUSH is set.
+		 *
+		 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+		 * invalidated with every MI_FLUSH.
+		 *
+		 * TLBs:
+		 *
+		 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+		 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+		 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+		 * are flushed at any MI_FLUSH.
+		 */
+
+		cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+		if ((invalidate_domains|flush_domains) &
+		    I915_GEM_DOMAIN_RENDER)
+			cmd &= ~MI_NO_WRITE_FLUSH;
+		if (!IS_I965G(dev)) {
+			/*
+			 * On the 965, the sampler cache always gets flushed
+			 * and this bit is reserved.
+			 */
+			if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+				cmd |= MI_READ_FLUSH;
+		}
+		if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+			cmd |= MI_EXE_FLUSH;
+
+#if WATCH_EXEC
+		DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
+#endif
+		intel_ring_begin(dev, ring, 8);
+		intel_ring_emit(dev, ring, cmd);
+		intel_ring_emit(dev, ring, MI_NOOP);
+		intel_ring_advance(dev, ring);
+	}
+}
+
+static unsigned int render_ring_get_head(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	return I915_READ(PRB0_HEAD) & HEAD_ADDR;
+}
+
+static unsigned int render_ring_get_tail(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	return I915_READ(PRB0_TAIL) & TAIL_ADDR;
+}
+
+static unsigned int render_ring_get_active_head(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
+
+	return I915_READ(acthd_reg);
+}
+
+static void render_ring_advance_ring(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	I915_WRITE(PRB0_TAIL, ring->tail);
+}
+
+static int init_ring_common(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	u32 head;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj_priv;
+	obj_priv = to_intel_bo(ring->gem_object);
+
+	/* Stop the ring if it's running. */
+	I915_WRITE(ring->regs.ctl, 0);
+	I915_WRITE(ring->regs.head, 0);
+	I915_WRITE(ring->regs.tail, 0);
+
+	/* Initialize the ring. */
+	I915_WRITE(ring->regs.start, obj_priv->gtt_offset);
+	head = ring->get_head(dev, ring);
+
+	/* G45 ring initialization fails to reset head to zero */
+	if (head != 0) {
+		DRM_ERROR("%s head not reset to zero "
+				"ctl %08x head %08x tail %08x start %08x\n",
+				ring->name,
+				I915_READ(ring->regs.ctl),
+				I915_READ(ring->regs.head),
+				I915_READ(ring->regs.tail),
+				I915_READ(ring->regs.start));
+
+		I915_WRITE(ring->regs.head, 0);
+
+		DRM_ERROR("%s head forced to zero "
+				"ctl %08x head %08x tail %08x start %08x\n",
+				ring->name,
+				I915_READ(ring->regs.ctl),
+				I915_READ(ring->regs.head),
+				I915_READ(ring->regs.tail),
+				I915_READ(ring->regs.start));
+	}
+
+	I915_WRITE(ring->regs.ctl,
+			((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
+			| RING_NO_REPORT | RING_VALID);
+
+	head = I915_READ(ring->regs.head) & HEAD_ADDR;
+	/* If the head is still not zero, the ring is dead */
+	if (head != 0) {
+		DRM_ERROR("%s initialization failed "
+				"ctl %08x head %08x tail %08x start %08x\n",
+				ring->name,
+				I915_READ(ring->regs.ctl),
+				I915_READ(ring->regs.head),
+				I915_READ(ring->regs.tail),
+				I915_READ(ring->regs.start));
+		return -EIO;
+	}
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		i915_kernel_lost_context(dev);
+	else {
+		ring->head = ring->get_head(dev, ring);
+		ring->tail = ring->get_tail(dev, ring);
+		ring->space = ring->head - (ring->tail + 8);
+		if (ring->space < 0)
+			ring->space += ring->size;
+	}
+	return 0;
+}
+
+static int init_render_ring(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret = init_ring_common(dev, ring);
+	if (IS_I9XX(dev) && !IS_GEN3(dev)) {
+		I915_WRITE(MI_MODE,
+				(VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
+	}
+	return ret;
+}
+
+#define PIPE_CONTROL_FLUSH(addr)					\
+do {									\
+	OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |		\
+		 PIPE_CONTROL_DEPTH_STALL | 2);				\
+	OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT);			\
+	OUT_RING(0);							\
+	OUT_RING(0);							\
+} while (0)
+
+/**
+ * Creates a new sequence number, emitting a write of it to the status page
+ * plus an interrupt, which will trigger i915_user_interrupt_handler.
+ *
+ * Must be called with struct_lock held.
+ *
+ * Returned sequence numbers are nonzero on success.
+ */
+static u32
+render_ring_add_request(struct drm_device *dev,
+		struct intel_ring_buffer *ring,
+		struct drm_file *file_priv,
+		u32 flush_domains)
+{
+	u32 seqno;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	seqno = intel_ring_get_seqno(dev, ring);
+
+	if (IS_GEN6(dev)) {
+		BEGIN_LP_RING(6);
+		OUT_RING(GFX_OP_PIPE_CONTROL | 3);
+		OUT_RING(PIPE_CONTROL_QW_WRITE |
+			 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
+			 PIPE_CONTROL_NOTIFY);
+		OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+		OUT_RING(seqno);
+		OUT_RING(0);
+		OUT_RING(0);
+		ADVANCE_LP_RING();
+	} else if (HAS_PIPE_CONTROL(dev)) {
+		u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
+
+		/*
+		 * Workaround qword write incoherence by flushing the
+		 * PIPE_NOTIFY buffers out to memory before requesting
+		 * an interrupt.
+		 */
+		BEGIN_LP_RING(32);
+		OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+			 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
+		OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+		OUT_RING(seqno);
+		OUT_RING(0);
+		PIPE_CONTROL_FLUSH(scratch_addr);
+		scratch_addr += 128; /* write to separate cachelines */
+		PIPE_CONTROL_FLUSH(scratch_addr);
+		scratch_addr += 128;
+		PIPE_CONTROL_FLUSH(scratch_addr);
+		scratch_addr += 128;
+		PIPE_CONTROL_FLUSH(scratch_addr);
+		scratch_addr += 128;
+		PIPE_CONTROL_FLUSH(scratch_addr);
+		scratch_addr += 128;
+		PIPE_CONTROL_FLUSH(scratch_addr);
+		OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+			 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
+			 PIPE_CONTROL_NOTIFY);
+		OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+		OUT_RING(seqno);
+		OUT_RING(0);
+		ADVANCE_LP_RING();
+	} else {
+		BEGIN_LP_RING(4);
+		OUT_RING(MI_STORE_DWORD_INDEX);
+		OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+		OUT_RING(seqno);
+
+		OUT_RING(MI_USER_INTERRUPT);
+		ADVANCE_LP_RING();
+	}
+	return seqno;
+}
+
+static u32
+render_ring_get_gem_seqno(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	if (HAS_PIPE_CONTROL(dev))
+		return ((volatile u32 *)(dev_priv->seqno_page))[0];
+	else
+		return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static void
+render_ring_get_user_irq(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+	if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
+		if (HAS_PCH_SPLIT(dev))
+			ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
+		else
+			i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+	}
+	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+}
+
+static void
+render_ring_put_user_irq(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+	BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
+	if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
+		if (HAS_PCH_SPLIT(dev))
+			ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
+		else
+			i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+	}
+	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+}
+
+static void render_setup_status_page(struct drm_device *dev,
+	struct	intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	if (IS_GEN6(dev)) {
+		I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
+		I915_READ(HWS_PGA_GEN6); /* posting read */
+	} else {
+		I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
+		I915_READ(HWS_PGA); /* posting read */
+	}
+
+}
+
+void
+bsd_ring_flush(struct drm_device *dev,
+		struct intel_ring_buffer *ring,
+		u32     invalidate_domains,
+		u32     flush_domains)
+{
+	intel_ring_begin(dev, ring, 8);
+	intel_ring_emit(dev, ring, MI_FLUSH);
+	intel_ring_emit(dev, ring, MI_NOOP);
+	intel_ring_advance(dev, ring);
+}
+
+static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	return I915_READ(BSD_RING_HEAD) & HEAD_ADDR;
+}
+
+static inline unsigned int bsd_ring_get_tail(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	return I915_READ(BSD_RING_TAIL) & TAIL_ADDR;
+}
+
+static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	return I915_READ(BSD_RING_ACTHD);
+}
+
+static inline void bsd_ring_advance_ring(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	I915_WRITE(BSD_RING_TAIL, ring->tail);
+}
+
+static int init_bsd_ring(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	return init_ring_common(dev, ring);
+}
+
+static u32
+bsd_ring_add_request(struct drm_device *dev,
+		struct intel_ring_buffer *ring,
+		struct drm_file *file_priv,
+		u32 flush_domains)
+{
+	u32 seqno;
+	seqno = intel_ring_get_seqno(dev, ring);
+	intel_ring_begin(dev, ring, 4);
+	intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
+	intel_ring_emit(dev, ring,
+			I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	intel_ring_emit(dev, ring, seqno);
+	intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
+	intel_ring_advance(dev, ring);
+
+	DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
+
+	return seqno;
+}
+
+static void bsd_setup_status_page(struct drm_device *dev,
+		struct  intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
+	I915_READ(BSD_HWS_PGA);
+}
+
+static void
+bsd_ring_get_user_irq(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	/* do nothing */
+}
+static void
+bsd_ring_put_user_irq(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	/* do nothing */
+}
+
+static u32
+bsd_ring_get_gem_seqno(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static int
+bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+		struct intel_ring_buffer *ring,
+		struct drm_i915_gem_execbuffer2 *exec,
+		struct drm_clip_rect *cliprects,
+		uint64_t exec_offset)
+{
+	uint32_t exec_start;
+	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+	intel_ring_begin(dev, ring, 2);
+	intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
+			(2 << 6) | MI_BATCH_NON_SECURE_I965);
+	intel_ring_emit(dev, ring, exec_start);
+	intel_ring_advance(dev, ring);
+	return 0;
+}
+
+
+static int
+render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+		struct intel_ring_buffer *ring,
+		struct drm_i915_gem_execbuffer2 *exec,
+		struct drm_clip_rect *cliprects,
+		uint64_t exec_offset)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int nbox = exec->num_cliprects;
+	int i = 0, count;
+	uint32_t exec_start, exec_len;
+	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+	exec_len = (uint32_t) exec->batch_len;
+
+	trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
+
+	count = nbox ? nbox : 1;
+
+	for (i = 0; i < count; i++) {
+		if (i < nbox) {
+			int ret = i915_emit_box(dev, cliprects, i,
+						exec->DR1, exec->DR4);
+			if (ret)
+				return ret;
+		}
+
+		if (IS_I830(dev) || IS_845G(dev)) {
+			intel_ring_begin(dev, ring, 4);
+			intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
+			intel_ring_emit(dev, ring,
+					exec_start | MI_BATCH_NON_SECURE);
+			intel_ring_emit(dev, ring, exec_start + exec_len - 4);
+			intel_ring_emit(dev, ring, 0);
+		} else {
+			intel_ring_begin(dev, ring, 4);
+			if (IS_I965G(dev)) {
+				intel_ring_emit(dev, ring,
+						MI_BATCH_BUFFER_START | (2 << 6)
+						| MI_BATCH_NON_SECURE_I965);
+				intel_ring_emit(dev, ring, exec_start);
+			} else {
+				intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
+						| (2 << 6));
+				intel_ring_emit(dev, ring, exec_start |
+						MI_BATCH_NON_SECURE);
+			}
+		}
+		intel_ring_advance(dev, ring);
+	}
+
+	/* XXX breadcrumb */
+	return 0;
+}
+
+static void cleanup_status_page(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+
+	obj = ring->status_page.obj;
+	if (obj == NULL)
+		return;
+	obj_priv = to_intel_bo(obj);
+
+	kunmap(obj_priv->pages[0]);
+	i915_gem_object_unpin(obj);
+	drm_gem_object_unreference(obj);
+	ring->status_page.obj = NULL;
+
+	memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+}
+
+static int init_status_page(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+	int ret;
+
+	obj = i915_gem_alloc_object(dev, 4096);
+	if (obj == NULL) {
+		DRM_ERROR("Failed to allocate status page\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+	obj_priv = to_intel_bo(obj);
+	obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+
+	ret = i915_gem_object_pin(obj, 4096);
+	if (ret != 0) {
+		goto err_unref;
+	}
+
+	ring->status_page.gfx_addr = obj_priv->gtt_offset;
+	ring->status_page.page_addr = kmap(obj_priv->pages[0]);
+	if (ring->status_page.page_addr == NULL) {
+		memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+		goto err_unpin;
+	}
+	ring->status_page.obj = obj;
+	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+	ring->setup_status_page(dev, ring);
+	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
+			ring->name, ring->status_page.gfx_addr);
+
+	return 0;
+
+err_unpin:
+	i915_gem_object_unpin(obj);
+err_unref:
+	drm_gem_object_unreference(obj);
+err:
+	return ret;
+}
+
+
+int intel_init_ring_buffer(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	int ret;
+	struct drm_i915_gem_object *obj_priv;
+	struct drm_gem_object *obj;
+	ring->dev = dev;
+
+	if (I915_NEED_GFX_HWS(dev)) {
+		ret = init_status_page(dev, ring);
+		if (ret)
+			return ret;
+	}
+
+	obj = i915_gem_alloc_object(dev, ring->size);
+	if (obj == NULL) {
+		DRM_ERROR("Failed to allocate ringbuffer\n");
+		ret = -ENOMEM;
+		goto cleanup;
+	}
+
+	ring->gem_object = obj;
+
+	ret = i915_gem_object_pin(obj, ring->alignment);
+	if (ret != 0) {
+		drm_gem_object_unreference(obj);
+		goto cleanup;
+	}
+
+	obj_priv = to_intel_bo(obj);
+	ring->map.size = ring->size;
+	ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
+	ring->map.type = 0;
+	ring->map.flags = 0;
+	ring->map.mtrr = 0;
+
+	drm_core_ioremap_wc(&ring->map, dev);
+	if (ring->map.handle == NULL) {
+		DRM_ERROR("Failed to map ringbuffer.\n");
+		i915_gem_object_unpin(obj);
+		drm_gem_object_unreference(obj);
+		ret = -EINVAL;
+		goto cleanup;
+	}
+
+	ring->virtual_start = ring->map.handle;
+	ret = ring->init(dev, ring);
+	if (ret != 0) {
+		intel_cleanup_ring_buffer(dev, ring);
+		return ret;
+	}
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		i915_kernel_lost_context(dev);
+	else {
+		ring->head = ring->get_head(dev, ring);
+		ring->tail = ring->get_tail(dev, ring);
+		ring->space = ring->head - (ring->tail + 8);
+		if (ring->space < 0)
+			ring->space += ring->size;
+	}
+	INIT_LIST_HEAD(&ring->active_list);
+	INIT_LIST_HEAD(&ring->request_list);
+	return ret;
+cleanup:
+	cleanup_status_page(dev, ring);
+	return ret;
+}
+
+void intel_cleanup_ring_buffer(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	if (ring->gem_object == NULL)
+		return;
+
+	drm_core_ioremapfree(&ring->map, dev);
+
+	i915_gem_object_unpin(ring->gem_object);
+	drm_gem_object_unreference(ring->gem_object);
+	ring->gem_object = NULL;
+	cleanup_status_page(dev, ring);
+}
+
+int intel_wrap_ring_buffer(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	unsigned int *virt;
+	int rem;
+	rem = ring->size - ring->tail;
+
+	if (ring->space < rem) {
+		int ret = intel_wait_ring_buffer(dev, ring, rem);
+		if (ret)
+			return ret;
+	}
+
+	virt = (unsigned int *)(ring->virtual_start + ring->tail);
+	rem /= 4;
+	while (rem--)
+		*virt++ = MI_NOOP;
+
+	ring->tail = 0;
+
+	return 0;
+}
+
+int intel_wait_ring_buffer(struct drm_device *dev,
+		struct intel_ring_buffer *ring, int n)
+{
+	unsigned long end;
+
+	trace_i915_ring_wait_begin (dev);
+	end = jiffies + 3 * HZ;
+	do {
+		ring->head = ring->get_head(dev, ring);
+		ring->space = ring->head - (ring->tail + 8);
+		if (ring->space < 0)
+			ring->space += ring->size;
+		if (ring->space >= n) {
+			trace_i915_ring_wait_end (dev);
+			return 0;
+		}
+
+		if (dev->primary->master) {
+			struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+			if (master_priv->sarea_priv)
+				master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+		}
+
+		yield();
+	} while (!time_after(jiffies, end));
+	trace_i915_ring_wait_end (dev);
+	return -EBUSY;
+}
+
+void intel_ring_begin(struct drm_device *dev,
+		struct intel_ring_buffer *ring, int n)
+{
+	if (unlikely(ring->tail + n > ring->size))
+		intel_wrap_ring_buffer(dev, ring);
+	if (unlikely(ring->space < n))
+		intel_wait_ring_buffer(dev, ring, n);
+}
+
+void intel_ring_emit(struct drm_device *dev,
+		struct intel_ring_buffer *ring, unsigned int data)
+{
+	unsigned int *virt = ring->virtual_start + ring->tail;
+	*virt = data;
+	ring->tail += 4;
+	ring->tail &= ring->size - 1;
+	ring->space -= 4;
+}
+
+void intel_ring_advance(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	ring->advance_ring(dev, ring);
+}
+
+void intel_fill_struct(struct drm_device *dev,
+		struct intel_ring_buffer *ring,
+		void *data,
+		unsigned int len)
+{
+	unsigned int *virt = ring->virtual_start + ring->tail;
+	BUG_ON((len&~(4-1)) != 0);
+	intel_ring_begin(dev, ring, len);
+	memcpy(virt, data, len);
+	ring->tail += len;
+	ring->tail &= ring->size - 1;
+	ring->space -= len;
+	intel_ring_advance(dev, ring);
+}
+
+u32 intel_ring_get_seqno(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	u32 seqno;
+	seqno = ring->next_seqno;
+
+	/* reserve 0 for non-seqno */
+	if (++ring->next_seqno == 0)
+		ring->next_seqno = 1;
+	return seqno;
+}
+
+struct intel_ring_buffer render_ring = {
+	.name			= "render ring",
+	.regs                   = {
+		.ctl = PRB0_CTL,
+		.head = PRB0_HEAD,
+		.tail = PRB0_TAIL,
+		.start = PRB0_START
+	},
+	.ring_flag		= I915_EXEC_RENDER,
+	.size			= 32 * PAGE_SIZE,
+	.alignment		= PAGE_SIZE,
+	.virtual_start		= NULL,
+	.dev			= NULL,
+	.gem_object		= NULL,
+	.head			= 0,
+	.tail			= 0,
+	.space			= 0,
+	.next_seqno		= 1,
+	.user_irq_refcount	= 0,
+	.irq_gem_seqno		= 0,
+	.waiting_gem_seqno	= 0,
+	.setup_status_page	= render_setup_status_page,
+	.init			= init_render_ring,
+	.get_head		= render_ring_get_head,
+	.get_tail		= render_ring_get_tail,
+	.get_active_head	= render_ring_get_active_head,
+	.advance_ring		= render_ring_advance_ring,
+	.flush			= render_ring_flush,
+	.add_request		= render_ring_add_request,
+	.get_gem_seqno		= render_ring_get_gem_seqno,
+	.user_irq_get		= render_ring_get_user_irq,
+	.user_irq_put		= render_ring_put_user_irq,
+	.dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
+	.status_page		= {NULL, 0, NULL},
+	.map			= {0,}
+};
+
+/* ring buffer for bit-stream decoder */
+
+struct intel_ring_buffer bsd_ring = {
+	.name                   = "bsd ring",
+	.regs			= {
+		.ctl = BSD_RING_CTL,
+		.head = BSD_RING_HEAD,
+		.tail = BSD_RING_TAIL,
+		.start = BSD_RING_START
+	},
+	.ring_flag		= I915_EXEC_BSD,
+	.size			= 32 * PAGE_SIZE,
+	.alignment		= PAGE_SIZE,
+	.virtual_start		= NULL,
+	.dev			= NULL,
+	.gem_object		= NULL,
+	.head			= 0,
+	.tail			= 0,
+	.space			= 0,
+	.next_seqno		= 1,
+	.user_irq_refcount	= 0,
+	.irq_gem_seqno		= 0,
+	.waiting_gem_seqno	= 0,
+	.setup_status_page	= bsd_setup_status_page,
+	.init			= init_bsd_ring,
+	.get_head		= bsd_ring_get_head,
+	.get_tail		= bsd_ring_get_tail,
+	.get_active_head	= bsd_ring_get_active_head,
+	.advance_ring		= bsd_ring_advance_ring,
+	.flush			= bsd_ring_flush,
+	.add_request		= bsd_ring_add_request,
+	.get_gem_seqno		= bsd_ring_get_gem_seqno,
+	.user_irq_get		= bsd_ring_get_user_irq,
+	.user_irq_put		= bsd_ring_put_user_irq,
+	.dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
+	.status_page		= {NULL, 0, NULL},
+	.map			= {0,}
+};
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
new file mode 100644
index 0000000..d5568d3
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -0,0 +1,124 @@
+#ifndef _INTEL_RINGBUFFER_H_
+#define _INTEL_RINGBUFFER_H_
+
+struct  intel_hw_status_page {
+	void		*page_addr;
+	unsigned int	gfx_addr;
+	struct		drm_gem_object *obj;
+};
+
+struct drm_i915_gem_execbuffer2;
+struct  intel_ring_buffer {
+	const char	*name;
+	struct		ring_regs {
+			u32 ctl;
+			u32 head;
+			u32 tail;
+			u32 start;
+	} regs;
+	unsigned int	ring_flag;
+	unsigned long	size;
+	unsigned int	alignment;
+	void		*virtual_start;
+	struct		drm_device *dev;
+	struct		drm_gem_object *gem_object;
+
+	unsigned int	head;
+	unsigned int	tail;
+	unsigned int	space;
+	u32		next_seqno;
+	struct intel_hw_status_page status_page;
+
+	u32		irq_gem_seqno;		/* last seq seem at irq time */
+	u32		waiting_gem_seqno;
+	int		user_irq_refcount;
+	void		(*user_irq_get)(struct drm_device *dev,
+			struct intel_ring_buffer *ring);
+	void		(*user_irq_put)(struct drm_device *dev,
+			struct intel_ring_buffer *ring);
+	void		(*setup_status_page)(struct drm_device *dev,
+			struct	intel_ring_buffer *ring);
+
+	int		(*init)(struct drm_device *dev,
+			struct intel_ring_buffer *ring);
+
+	unsigned int	(*get_head)(struct drm_device *dev,
+			struct intel_ring_buffer *ring);
+	unsigned int	(*get_tail)(struct drm_device *dev,
+			struct intel_ring_buffer *ring);
+	unsigned int	(*get_active_head)(struct drm_device *dev,
+			struct intel_ring_buffer *ring);
+	void		(*advance_ring)(struct drm_device *dev,
+			struct intel_ring_buffer *ring);
+	void		(*flush)(struct drm_device *dev,
+			struct intel_ring_buffer *ring,
+			u32	invalidate_domains,
+			u32	flush_domains);
+	u32		(*add_request)(struct drm_device *dev,
+			struct intel_ring_buffer *ring,
+			struct drm_file *file_priv,
+			u32 flush_domains);
+	u32		(*get_gem_seqno)(struct drm_device *dev,
+			struct intel_ring_buffer *ring);
+	int		(*dispatch_gem_execbuffer)(struct drm_device *dev,
+			struct intel_ring_buffer *ring,
+			struct drm_i915_gem_execbuffer2 *exec,
+			struct drm_clip_rect *cliprects,
+			uint64_t exec_offset);
+
+	/**
+	 * List of objects currently involved in rendering from the
+	 * ringbuffer.
+	 *
+	 * Includes buffers having the contents of their GPU caches
+	 * flushed, not necessarily primitives.  last_rendering_seqno
+	 * represents when the rendering involved will be completed.
+	 *
+	 * A reference is held on the buffer while on this list.
+	 */
+	struct list_head active_list;
+
+	/**
+	 * List of breadcrumbs associated with GPU requests currently
+	 * outstanding.
+	 */
+	struct list_head request_list;
+
+	wait_queue_head_t irq_queue;
+	drm_local_map_t map;
+};
+
+static inline u32
+intel_read_status_page(struct intel_ring_buffer *ring,
+		int reg)
+{
+	u32 *regs = ring->status_page.page_addr;
+	return regs[reg];
+}
+
+int intel_init_ring_buffer(struct drm_device *dev,
+		struct intel_ring_buffer *ring);
+void intel_cleanup_ring_buffer(struct drm_device *dev,
+		struct intel_ring_buffer *ring);
+int intel_wait_ring_buffer(struct drm_device *dev,
+		struct intel_ring_buffer *ring, int n);
+int intel_wrap_ring_buffer(struct drm_device *dev,
+		struct intel_ring_buffer *ring);
+void intel_ring_begin(struct drm_device *dev,
+		struct intel_ring_buffer *ring, int n);
+void intel_ring_emit(struct drm_device *dev,
+		struct intel_ring_buffer *ring, u32 data);
+void intel_fill_struct(struct drm_device *dev,
+		struct intel_ring_buffer *ring,
+		void *data,
+		unsigned int len);
+void intel_ring_advance(struct drm_device *dev,
+		struct intel_ring_buffer *ring);
+
+u32 intel_ring_get_seqno(struct drm_device *dev,
+		struct intel_ring_buffer *ring);
+
+extern struct intel_ring_buffer render_ring;
+extern struct intel_ring_buffer bsd_ring;
+
+#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index aba72c4..76993ac 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1479,7 +1479,7 @@
 		intel_encoder = enc_to_intel_encoder(encoder);
 		if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
 			list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-				if (connector && encoder == intel_attached_encoder(connector))
+				if (encoder == intel_attached_encoder(connector))
 					return connector;
 			}
 		}
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index b64a8d7..7f0028e 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -275,6 +275,7 @@
 #define I915_PARAM_HAS_OVERLAY           7
 #define I915_PARAM_HAS_PAGEFLIPPING	 8
 #define I915_PARAM_HAS_EXECBUF2          9
+#define I915_PARAM_HAS_BSD		 10
 
 typedef struct drm_i915_getparam {
 	int param;
@@ -616,7 +617,9 @@
 	__u32 num_cliprects;
 	/** This is a struct drm_clip_rect *cliprects */
 	__u64 cliprects_ptr;
-	__u64 flags; /* currently unused */
+#define I915_EXEC_RENDER                 (1<<0)
+#define I915_EXEC_BSD                    (1<<1)
+	__u64 flags;
 	__u64 rsvd1;
 	__u64 rsvd2;
 };