Merge remote branch 'nouveau/drm-nouveau-next' of ../drm-nouveau-next into drm-core-next

* 'nouveau/drm-nouveau-next' of ../drm-nouveau-next: (93 commits)
  drm/nv50: fix a couple of vm init issues
  drm/nv04-nv40: Fix up PCI(E) GART DMA object bus address calculation.
  drm/nouveau: kick vram functions out into an "engine"
  drm/nouveau: allow gpuobj vinst to be a virtual address when necessary
  drm/nv50: tidy up PCIEGART implementation
  drm/nv50: enable non-contig vram allocations where requested
  drm/nv50: enable 4KiB pages for small vram allocations
  drm/nv50: implement global channel address space on new VM code
  drm/nv50: implement BAR1/BAR3 management on top of new VM code
  drm/nv50: import new vm code
  drm/nv50: implement custom vram mm
  drm/nouveau: Avoid potential race between nouveau_fence_update() and context takedown.
  drm/nouveau: fix use of drm_mm_node in semaphore object
  drm/nouveau: wrap calls to ttm_bo_validate()
  drm/nouveau: no need to zero dma objects, we fill them completely anyway
  drm/nouveau: introduce a util function to wait on reg != val
  drm/nouveau: implicitly insert non-DMA objects into RAMHT
  drm/nouveau: make fifo.create_context() responsible for mapping control regs
  drm/nouveau: Spin for a bit in nouveau_fence_wait() before yielding the CPU.
  drm/nouveau: Use WC memory on the AGP GART.
  ...
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 23fa82d..b1d8941 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -5,12 +5,13 @@
 ccflags-y := -Iinclude/drm
 nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
              nouveau_object.o nouveau_irq.o nouveau_notifier.o \
-             nouveau_sgdma.o nouveau_dma.o \
+             nouveau_sgdma.o nouveau_dma.o nouveau_util.o \
              nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
              nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
              nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
              nouveau_dp.o nouveau_ramht.o \
 	     nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
+	     nouveau_mm.o nouveau_vm.o \
              nv04_timer.o \
              nv04_mc.o nv40_mc.o nv50_mc.o \
              nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
@@ -18,14 +19,16 @@
              nv04_graph.o nv10_graph.o nv20_graph.o \
              nv40_graph.o nv50_graph.o nvc0_graph.o \
              nv40_grctx.o nv50_grctx.o \
+             nv84_crypt.o \
              nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
-             nv50_crtc.o nv50_dac.o nv50_sor.o \
+             nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \
              nv50_cursor.o nv50_display.o nv50_fbcon.o \
              nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
              nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
              nv10_gpio.o nv50_gpio.o \
 	     nv50_calc.o \
-	     nv04_pm.o nv50_pm.o nva3_pm.o
+	     nv04_pm.o nv50_pm.o nva3_pm.o \
+	     nv50_vram.o nv50_vm.o
 
 nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
 nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index b229357..d304655 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -6053,52 +6053,17 @@
 	return entry;
 }
 
-static void fabricate_vga_output(struct dcb_table *dcb, int i2c, int heads)
+static void fabricate_dcb_output(struct dcb_table *dcb, int type, int i2c,
+				 int heads, int or)
 {
 	struct dcb_entry *entry = new_dcb_entry(dcb);
 
-	entry->type = 0;
+	entry->type = type;
 	entry->i2c_index = i2c;
 	entry->heads = heads;
-	entry->location = DCB_LOC_ON_CHIP;
-	entry->or = 1;
-}
-
-static void fabricate_dvi_i_output(struct dcb_table *dcb, bool twoHeads)
-{
-	struct dcb_entry *entry = new_dcb_entry(dcb);
-
-	entry->type = 2;
-	entry->i2c_index = LEGACY_I2C_PANEL;
-	entry->heads = twoHeads ? 3 : 1;
-	entry->location = !DCB_LOC_ON_CHIP;	/* ie OFF CHIP */
-	entry->or = 1;	/* means |0x10 gets set on CRE_LCD__INDEX */
-	entry->duallink_possible = false; /* SiI164 and co. are single link */
-
-#if 0
-	/*
-	 * For dvi-a either crtc probably works, but my card appears to only
-	 * support dvi-d.  "nvidia" still attempts to program it for dvi-a,
-	 * doing the full fp output setup (program 0x6808.. fp dimension regs,
-	 * setting 0x680848 to 0x10000111 to enable, maybe setting 0x680880);
-	 * the monitor picks up the mode res ok and lights up, but no pixel
-	 * data appears, so the board manufacturer probably connected up the
-	 * sync lines, but missed the video traces / components
-	 *
-	 * with this introduction, dvi-a left as an exercise for the reader.
-	 */
-	fabricate_vga_output(dcb, LEGACY_I2C_PANEL, entry->heads);
-#endif
-}
-
-static void fabricate_tv_output(struct dcb_table *dcb, bool twoHeads)
-{
-	struct dcb_entry *entry = new_dcb_entry(dcb);
-
-	entry->type = 1;
-	entry->i2c_index = LEGACY_I2C_TV;
-	entry->heads = twoHeads ? 3 : 1;
-	entry->location = !DCB_LOC_ON_CHIP;	/* ie OFF CHIP */
+	if (type != OUTPUT_ANALOG)
+		entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
+	entry->or = or;
 }
 
 static bool
@@ -6365,8 +6330,36 @@
 	return true;
 }
 
+static void
+fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
+{
+	struct dcb_table *dcb = &bios->dcb;
+	int all_heads = (nv_two_heads(dev) ? 3 : 1);
+
+#ifdef __powerpc__
+	/* Apple iMac G4 NV17 */
+	if (of_machine_is_compatible("PowerMac4,5")) {
+		fabricate_dcb_output(dcb, OUTPUT_TMDS, 0, all_heads, 1);
+		fabricate_dcb_output(dcb, OUTPUT_ANALOG, 1, all_heads, 2);
+		return;
+	}
+#endif
+
+	/* Make up some sane defaults */
+	fabricate_dcb_output(dcb, OUTPUT_ANALOG, LEGACY_I2C_CRT, 1, 1);
+
+	if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
+		fabricate_dcb_output(dcb, OUTPUT_TV, LEGACY_I2C_TV,
+				     all_heads, 0);
+
+	else if (bios->tmds.output0_script_ptr ||
+		 bios->tmds.output1_script_ptr)
+		fabricate_dcb_output(dcb, OUTPUT_TMDS, LEGACY_I2C_PANEL,
+				     all_heads, 1);
+}
+
 static int
-parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
+parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct dcb_table *dcb = &bios->dcb;
@@ -6386,12 +6379,7 @@
 
 	/* this situation likely means a really old card, pre DCB */
 	if (dcbptr == 0x0) {
-		NV_INFO(dev, "Assuming a CRT output exists\n");
-		fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
-
-		if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
-			fabricate_tv_output(dcb, twoHeads);
-
+		fabricate_dcb_encoder_table(dev, bios);
 		return 0;
 	}
 
@@ -6451,21 +6439,7 @@
 		 */
 		NV_TRACEWARN(dev, "No useful information in BIOS output table; "
 				  "adding all possible outputs\n");
-		fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
-
-		/*
-		 * Attempt to detect TV before DVI because the test
-		 * for the former is more accurate and it rules the
-		 * latter out.
-		 */
-		if (nv04_tv_identify(dev,
-				     bios->legacy.i2c_indices.tv) >= 0)
-			fabricate_tv_output(dcb, twoHeads);
-
-		else if (bios->tmds.output0_script_ptr ||
-			 bios->tmds.output1_script_ptr)
-			fabricate_dvi_i_output(dcb, twoHeads);
-
+		fabricate_dcb_encoder_table(dev, bios);
 		return 0;
 	}
 
@@ -6859,7 +6833,7 @@
 	if (ret)
 		return ret;
 
-	ret = parse_dcb_table(dev, bios, nv_two_heads(dev));
+	ret = parse_dcb_table(dev, bios);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index c41e1c2..42d1ad6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -32,6 +32,8 @@
 #include "nouveau_drm.h"
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
+#include "nouveau_mm.h"
+#include "nouveau_vm.h"
 
 #include <linux/log2.h>
 #include <linux/slab.h>
@@ -46,82 +48,51 @@
 	if (unlikely(nvbo->gem))
 		DRM_ERROR("bo %p still attached to GEM object\n", bo);
 
-	if (nvbo->tile)
-		nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
-
+	nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
+	nouveau_vm_put(&nvbo->vma);
 	kfree(nvbo);
 }
 
 static void
-nouveau_bo_fixup_align(struct drm_device *dev,
-		       uint32_t tile_mode, uint32_t tile_flags,
-		       int *align, int *size)
+nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
+		       int *page_shift)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
 
-	/*
-	 * Some of the tile_flags have a periodic structure of N*4096 bytes,
-	 * align to to that as well as the page size. Align the size to the
-	 * appropriate boundaries. This does imply that sizes are rounded up
-	 * 3-7 pages, so be aware of this and do not waste memory by allocating
-	 * many small buffers.
-	 */
-	if (dev_priv->card_type == NV_50) {
-		uint32_t block_size = dev_priv->vram_size >> 15;
-		int i;
-
-		switch (tile_flags) {
-		case 0x1800:
-		case 0x2800:
-		case 0x4800:
-		case 0x7a00:
-			if (is_power_of_2(block_size)) {
-				for (i = 1; i < 10; i++) {
-					*align = 12 * i * block_size;
-					if (!(*align % 65536))
-						break;
-				}
-			} else {
-				for (i = 1; i < 10; i++) {
-					*align = 8 * i * block_size;
-					if (!(*align % 65536))
-						break;
-				}
-			}
-			*size = roundup(*size, *align);
-			break;
-		default:
-			break;
-		}
-
-	} else {
-		if (tile_mode) {
+	if (dev_priv->card_type < NV_50) {
+		if (nvbo->tile_mode) {
 			if (dev_priv->chipset >= 0x40) {
 				*align = 65536;
-				*size = roundup(*size, 64 * tile_mode);
+				*size = roundup(*size, 64 * nvbo->tile_mode);
 
 			} else if (dev_priv->chipset >= 0x30) {
 				*align = 32768;
-				*size = roundup(*size, 64 * tile_mode);
+				*size = roundup(*size, 64 * nvbo->tile_mode);
 
 			} else if (dev_priv->chipset >= 0x20) {
 				*align = 16384;
-				*size = roundup(*size, 64 * tile_mode);
+				*size = roundup(*size, 64 * nvbo->tile_mode);
 
 			} else if (dev_priv->chipset >= 0x10) {
 				*align = 16384;
-				*size = roundup(*size, 32 * tile_mode);
+				*size = roundup(*size, 32 * nvbo->tile_mode);
 			}
 		}
+	} else {
+		if (likely(dev_priv->chan_vm)) {
+			if (*size > 256 * 1024)
+				*page_shift = dev_priv->chan_vm->lpg_shift;
+			else
+				*page_shift = dev_priv->chan_vm->spg_shift;
+		} else {
+			*page_shift = 12;
+		}
+
+		*size = roundup(*size, (1 << *page_shift));
+		*align = max((1 << *page_shift), *align);
 	}
 
-	/* ALIGN works only on powers of two. */
 	*size = roundup(*size, PAGE_SIZE);
-
-	if (dev_priv->card_type == NV_50) {
-		*size = roundup(*size, 65536);
-		*align = max(65536, *align);
-	}
 }
 
 int
@@ -132,7 +103,7 @@
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_bo *nvbo;
-	int ret = 0;
+	int ret = 0, page_shift = 0;
 
 	nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
 	if (!nvbo)
@@ -145,10 +116,18 @@
 	nvbo->tile_flags = tile_flags;
 	nvbo->bo.bdev = &dev_priv->ttm.bdev;
 
-	nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo),
-			       &align, &size);
+	nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
 	align >>= PAGE_SHIFT;
 
+	if (!nvbo->no_vm && dev_priv->chan_vm) {
+		ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
+				     NV_MEM_ACCESS_RW, &nvbo->vma);
+		if (ret) {
+			kfree(nvbo);
+			return ret;
+		}
+	}
+
 	nouveau_bo_placement_set(nvbo, flags, 0);
 
 	nvbo->channel = chan;
@@ -161,6 +140,11 @@
 	}
 	nvbo->channel = NULL;
 
+	if (nvbo->vma.node) {
+		if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
+			nvbo->bo.offset = nvbo->vma.offset;
+	}
+
 	*pnvbo = nvbo;
 	return 0;
 }
@@ -244,7 +228,7 @@
 
 	nouveau_bo_placement_set(nvbo, memtype, 0);
 
-	ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
+	ret = nouveau_bo_validate(nvbo, false, false, false);
 	if (ret == 0) {
 		switch (bo->mem.mem_type) {
 		case TTM_PL_VRAM:
@@ -280,7 +264,7 @@
 
 	nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
 
-	ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
+	ret = nouveau_bo_validate(nvbo, false, false, false);
 	if (ret == 0) {
 		switch (bo->mem.mem_type) {
 		case TTM_PL_VRAM:
@@ -319,6 +303,25 @@
 		ttm_bo_kunmap(&nvbo->kmap);
 }
 
+int
+nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
+		    bool no_wait_reserve, bool no_wait_gpu)
+{
+	int ret;
+
+	ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
+			      no_wait_reserve, no_wait_gpu);
+	if (ret)
+		return ret;
+
+	if (nvbo->vma.node) {
+		if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
+			nvbo->bo.offset = nvbo->vma.offset;
+	}
+
+	return 0;
+}
+
 u16
 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
 {
@@ -410,37 +413,40 @@
 		man->default_caching = TTM_PL_FLAG_CACHED;
 		break;
 	case TTM_PL_VRAM:
-		man->func = &ttm_bo_manager_func;
+		if (dev_priv->card_type == NV_50) {
+			man->func = &nouveau_vram_manager;
+			man->io_reserve_fastpath = false;
+			man->use_io_reserve_lru = true;
+		} else {
+			man->func = &ttm_bo_manager_func;
+		}
 		man->flags = TTM_MEMTYPE_FLAG_FIXED |
 			     TTM_MEMTYPE_FLAG_MAPPABLE;
 		man->available_caching = TTM_PL_FLAG_UNCACHED |
 					 TTM_PL_FLAG_WC;
 		man->default_caching = TTM_PL_FLAG_WC;
-		if (dev_priv->card_type == NV_50)
-			man->gpu_offset = 0x40000000;
-		else
-			man->gpu_offset = 0;
 		break;
 	case TTM_PL_TT:
 		man->func = &ttm_bo_manager_func;
 		switch (dev_priv->gart_info.type) {
 		case NOUVEAU_GART_AGP:
 			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
-			man->available_caching = TTM_PL_FLAG_UNCACHED;
-			man->default_caching = TTM_PL_FLAG_UNCACHED;
+			man->available_caching = TTM_PL_FLAG_UNCACHED |
+				TTM_PL_FLAG_WC;
+			man->default_caching = TTM_PL_FLAG_WC;
 			break;
 		case NOUVEAU_GART_SGDMA:
 			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
 				     TTM_MEMTYPE_FLAG_CMA;
 			man->available_caching = TTM_PL_MASK_CACHING;
 			man->default_caching = TTM_PL_FLAG_CACHED;
+			man->gpu_offset = dev_priv->gart_info.aper_base;
 			break;
 		default:
 			NV_ERROR(dev, "Unknown GART type: %d\n",
 				 dev_priv->gart_info.type);
 			return -EINVAL;
 		}
-		man->gpu_offset = dev_priv->vm_gart_base;
 		break;
 	default:
 		NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
@@ -485,16 +491,9 @@
 	if (ret)
 		return ret;
 
-	if (nvbo->channel) {
-		ret = nouveau_fence_sync(fence, nvbo->channel);
-		if (ret)
-			goto out;
-	}
-
 	ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
 					no_wait_reserve, no_wait_gpu, new_mem);
-out:
-	nouveau_fence_unref((void *)&fence);
+	nouveau_fence_unref(&fence);
 	return ret;
 }
 
@@ -529,14 +528,14 @@
 	dst_offset = new_mem->start << PAGE_SHIFT;
 	if (!nvbo->no_vm) {
 		if (old_mem->mem_type == TTM_PL_VRAM)
-			src_offset += dev_priv->vm_vram_base;
+			src_offset  = nvbo->vma.offset;
 		else
-			src_offset += dev_priv->vm_gart_base;
+			src_offset += dev_priv->gart_info.aper_base;
 
 		if (new_mem->mem_type == TTM_PL_VRAM)
-			dst_offset += dev_priv->vm_vram_base;
+			dst_offset  = nvbo->vma.offset;
 		else
-			dst_offset += dev_priv->vm_gart_base;
+			dst_offset += dev_priv->gart_info.aper_base;
 	}
 
 	ret = RING_SPACE(chan, 3);
@@ -683,17 +682,24 @@
 	int ret;
 
 	chan = nvbo->channel;
-	if (!chan || nvbo->no_vm)
+	if (!chan || nvbo->no_vm) {
 		chan = dev_priv->channel;
+		mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
+	}
 
 	if (dev_priv->card_type < NV_50)
 		ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
 	else
 		ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
-	if (ret)
-		return ret;
+	if (ret == 0) {
+		ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
+						    no_wait_reserve,
+						    no_wait_gpu, new_mem);
+	}
 
-	return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+	if (chan == dev_priv->channel)
+		mutex_unlock(&chan->mutex);
+	return ret;
 }
 
 static int
@@ -771,7 +777,6 @@
 	struct drm_device *dev = dev_priv->dev;
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	uint64_t offset;
-	int ret;
 
 	if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
 		/* Nothing to do. */
@@ -781,18 +786,12 @@
 
 	offset = new_mem->start << PAGE_SHIFT;
 
-	if (dev_priv->card_type == NV_50) {
-		ret = nv50_mem_vm_bind_linear(dev,
-					      offset + dev_priv->vm_vram_base,
-					      new_mem->size,
-					      nouveau_bo_tile_layout(nvbo),
-					      offset);
-		if (ret)
-			return ret;
-
+	if (dev_priv->chan_vm) {
+		nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
 	} else if (dev_priv->card_type >= NV_10) {
 		*new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
-						nvbo->tile_mode);
+						nvbo->tile_mode,
+						nvbo->tile_flags);
 	}
 
 	return 0;
@@ -808,9 +807,7 @@
 
 	if (dev_priv->card_type >= NV_10 &&
 	    dev_priv->card_type < NV_50) {
-		if (*old_tile)
-			nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
-
+		nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
 		*old_tile = new_tile;
 	}
 }
@@ -879,6 +876,7 @@
 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
 	struct drm_device *dev = dev_priv->dev;
+	int ret;
 
 	mem->bus.addr = NULL;
 	mem->bus.offset = 0;
@@ -901,9 +899,32 @@
 #endif
 		break;
 	case TTM_PL_VRAM:
-		mem->bus.offset = mem->start << PAGE_SHIFT;
+	{
+		struct nouveau_vram *vram = mem->mm_node;
+
+		if (!dev_priv->bar1_vm) {
+			mem->bus.offset = mem->start << PAGE_SHIFT;
+			mem->bus.base = pci_resource_start(dev->pdev, 1);
+			mem->bus.is_iomem = true;
+			break;
+		}
+
+		ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size, 12,
+				     NV_MEM_ACCESS_RW, &vram->bar_vma);
+		if (ret)
+			return ret;
+
+		nouveau_vm_map(&vram->bar_vma, vram);
+		if (ret) {
+			nouveau_vm_put(&vram->bar_vma);
+			return ret;
+		}
+
+		mem->bus.offset  = vram->bar_vma.offset;
+		mem->bus.offset -= 0x0020000000ULL;
 		mem->bus.base = pci_resource_start(dev->pdev, 1);
 		mem->bus.is_iomem = true;
+	}
 		break;
 	default:
 		return -EINVAL;
@@ -914,6 +935,17 @@
 static void
 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 {
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+	struct nouveau_vram *vram = mem->mm_node;
+
+	if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
+		return;
+
+	if (!vram->bar_vma.node)
+		return;
+
+	nouveau_vm_unmap(&vram->bar_vma);
+	nouveau_vm_put(&vram->bar_vma);
 }
 
 static int
@@ -939,7 +971,23 @@
 	nvbo->placement.fpfn = 0;
 	nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
 	nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
-	return ttm_bo_validate(bo, &nvbo->placement, false, true, false);
+	return nouveau_bo_validate(nvbo, false, true, false);
+}
+
+void
+nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
+{
+	struct nouveau_fence *old_fence;
+
+	if (likely(fence))
+		nouveau_fence_ref(fence);
+
+	spin_lock(&nvbo->bo.bdev->fence_lock);
+	old_fence = nvbo->bo.sync_obj;
+	nvbo->bo.sync_obj = fence;
+	spin_unlock(&nvbo->bo.bdev->fence_lock);
+
+	nouveau_fence_unref(&old_fence);
 }
 
 struct ttm_bo_driver nouveau_bo_driver = {
@@ -949,11 +997,11 @@
 	.evict_flags = nouveau_bo_evict_flags,
 	.move = nouveau_bo_move,
 	.verify_access = nouveau_bo_verify_access,
-	.sync_obj_signaled = nouveau_fence_signalled,
-	.sync_obj_wait = nouveau_fence_wait,
-	.sync_obj_flush = nouveau_fence_flush,
-	.sync_obj_unref = nouveau_fence_unref,
-	.sync_obj_ref = nouveau_fence_ref,
+	.sync_obj_signaled = __nouveau_fence_signalled,
+	.sync_obj_wait = __nouveau_fence_wait,
+	.sync_obj_flush = __nouveau_fence_flush,
+	.sync_obj_unref = __nouveau_fence_unref,
+	.sync_obj_ref = __nouveau_fence_ref,
 	.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
 	.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
 	.io_mem_free = &nouveau_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 373950e..6f37995 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -39,22 +39,22 @@
 
 	if (dev_priv->card_type >= NV_50) {
 		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
-					     dev_priv->vm_end, NV_DMA_ACCESS_RO,
-					     NV_DMA_TARGET_AGP, &pushbuf);
+					     (1ULL << 40), NV_MEM_ACCESS_RO,
+					     NV_MEM_TARGET_VM, &pushbuf);
 		chan->pushbuf_base = pb->bo.offset;
 	} else
 	if (pb->bo.mem.mem_type == TTM_PL_TT) {
-		ret = nouveau_gpuobj_gart_dma_new(chan, 0,
-						  dev_priv->gart_info.aper_size,
-						  NV_DMA_ACCESS_RO, &pushbuf,
-						  NULL);
+		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
+					     dev_priv->gart_info.aper_size,
+					     NV_MEM_ACCESS_RO,
+					     NV_MEM_TARGET_GART, &pushbuf);
 		chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
 	} else
 	if (dev_priv->card_type != NV_04) {
 		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
 					     dev_priv->fb_available_size,
-					     NV_DMA_ACCESS_RO,
-					     NV_DMA_TARGET_VIDMEM, &pushbuf);
+					     NV_MEM_ACCESS_RO,
+					     NV_MEM_TARGET_VRAM, &pushbuf);
 		chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
 	} else {
 		/* NV04 cmdbuf hack, from original ddx.. not sure of it's
@@ -62,11 +62,10 @@
 		 * VRAM.
 		 */
 		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
-					     pci_resource_start(dev->pdev,
-					     1),
+					     pci_resource_start(dev->pdev, 1),
 					     dev_priv->fb_available_size,
-					     NV_DMA_ACCESS_RO,
-					     NV_DMA_TARGET_PCI, &pushbuf);
+					     NV_MEM_ACCESS_RO,
+					     NV_MEM_TARGET_PCI, &pushbuf);
 		chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
 	}
 
@@ -107,74 +106,60 @@
 int
 nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
 		      struct drm_file *file_priv,
-		      uint32_t vram_handle, uint32_t tt_handle)
+		      uint32_t vram_handle, uint32_t gart_handle)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
 	struct nouveau_channel *chan;
-	int channel, user;
+	unsigned long flags;
 	int ret;
 
-	/*
-	 * Alright, here is the full story
-	 * Nvidia cards have multiple hw fifo contexts (praise them for that,
-	 * no complicated crash-prone context switches)
-	 * We allocate a new context for each app and let it write to it
-	 * directly (woo, full userspace command submission !)
-	 * When there are no more contexts, you lost
-	 */
-	for (channel = 0; channel < pfifo->channels; channel++) {
-		if (dev_priv->fifos[channel] == NULL)
-			break;
-	}
-
-	/* no more fifos. you lost. */
-	if (channel == pfifo->channels)
-		return -EINVAL;
-
-	dev_priv->fifos[channel] = kzalloc(sizeof(struct nouveau_channel),
-					   GFP_KERNEL);
-	if (!dev_priv->fifos[channel])
+	/* allocate and lock channel structure */
+	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+	if (!chan)
 		return -ENOMEM;
-	chan = dev_priv->fifos[channel];
-	INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
-	INIT_LIST_HEAD(&chan->fence.pending);
 	chan->dev = dev;
-	chan->id = channel;
 	chan->file_priv = file_priv;
 	chan->vram_handle = vram_handle;
-	chan->gart_handle = tt_handle;
+	chan->gart_handle = gart_handle;
 
-	NV_INFO(dev, "Allocating FIFO number %d\n", channel);
+	kref_init(&chan->ref);
+	atomic_set(&chan->users, 1);
+	mutex_init(&chan->mutex);
+	mutex_lock(&chan->mutex);
+
+	/* allocate hw channel id */
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
+	for (chan->id = 0; chan->id < pfifo->channels; chan->id++) {
+		if (!dev_priv->channels.ptr[chan->id]) {
+			nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+
+	if (chan->id == pfifo->channels) {
+		mutex_unlock(&chan->mutex);
+		kfree(chan);
+		return -ENODEV;
+	}
+
+	NV_DEBUG(dev, "initialising channel %d\n", chan->id);
+	INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
+	INIT_LIST_HEAD(&chan->nvsw.flip);
+	INIT_LIST_HEAD(&chan->fence.pending);
 
 	/* Allocate DMA push buffer */
 	chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev);
 	if (!chan->pushbuf_bo) {
 		ret = -ENOMEM;
 		NV_ERROR(dev, "pushbuf %d\n", ret);
-		nouveau_channel_free(chan);
+		nouveau_channel_put(&chan);
 		return ret;
 	}
 
 	nouveau_dma_pre_init(chan);
-
-	/* Locate channel's user control regs */
-	if (dev_priv->card_type < NV_40)
-		user = NV03_USER(channel);
-	else
-	if (dev_priv->card_type < NV_50)
-		user = NV40_USER(channel);
-	else
-		user = NV50_USER(channel);
-
-	chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user,
-								PAGE_SIZE);
-	if (!chan->user) {
-		NV_ERROR(dev, "ioremap of regs failed.\n");
-		nouveau_channel_free(chan);
-		return -ENOMEM;
-	}
 	chan->user_put = 0x40;
 	chan->user_get = 0x44;
 
@@ -182,15 +167,15 @@
 	ret = nouveau_notifier_init_channel(chan);
 	if (ret) {
 		NV_ERROR(dev, "ntfy %d\n", ret);
-		nouveau_channel_free(chan);
+		nouveau_channel_put(&chan);
 		return ret;
 	}
 
 	/* Setup channel's default objects */
-	ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle);
+	ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
 	if (ret) {
 		NV_ERROR(dev, "gpuobj %d\n", ret);
-		nouveau_channel_free(chan);
+		nouveau_channel_put(&chan);
 		return ret;
 	}
 
@@ -198,7 +183,7 @@
 	ret = nouveau_channel_pushbuf_ctxdma_init(chan);
 	if (ret) {
 		NV_ERROR(dev, "pbctxdma %d\n", ret);
-		nouveau_channel_free(chan);
+		nouveau_channel_put(&chan);
 		return ret;
 	}
 
@@ -206,16 +191,18 @@
 	pfifo->reassign(dev, false);
 
 	/* Create a graphics context for new channel */
-	ret = pgraph->create_context(chan);
-	if (ret) {
-		nouveau_channel_free(chan);
-		return ret;
+	if (dev_priv->card_type < NV_50) {
+		ret = pgraph->create_context(chan);
+		if (ret) {
+			nouveau_channel_put(&chan);
+			return ret;
+		}
 	}
 
 	/* Construct inital RAMFC for new channel */
 	ret = pfifo->create_context(chan);
 	if (ret) {
-		nouveau_channel_free(chan);
+		nouveau_channel_put(&chan);
 		return ret;
 	}
 
@@ -225,83 +212,108 @@
 	if (!ret)
 		ret = nouveau_fence_channel_init(chan);
 	if (ret) {
-		nouveau_channel_free(chan);
+		nouveau_channel_put(&chan);
 		return ret;
 	}
 
 	nouveau_debugfs_channel_init(chan);
 
-	NV_INFO(dev, "%s: initialised FIFO %d\n", __func__, channel);
+	NV_DEBUG(dev, "channel %d initialised\n", chan->id);
 	*chan_ret = chan;
 	return 0;
 }
 
-/* stops a fifo */
-void
-nouveau_channel_free(struct nouveau_channel *chan)
+struct nouveau_channel *
+nouveau_channel_get_unlocked(struct nouveau_channel *ref)
 {
-	struct drm_device *dev = chan->dev;
+	struct nouveau_channel *chan = NULL;
+
+	if (likely(ref && atomic_inc_not_zero(&ref->users)))
+		nouveau_channel_ref(ref, &chan);
+
+	return chan;
+}
+
+struct nouveau_channel *
+nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id)
+{
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
-	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	struct nouveau_channel *chan;
 	unsigned long flags;
-	int ret;
 
-	NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id);
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
+	chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]);
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
 
-	nouveau_debugfs_channel_fini(chan);
+	if (unlikely(!chan))
+		return ERR_PTR(-EINVAL);
 
-	/* Give outstanding push buffers a chance to complete */
-	nouveau_fence_update(chan);
-	if (chan->fence.sequence != chan->fence.sequence_ack) {
-		struct nouveau_fence *fence = NULL;
-
-		ret = nouveau_fence_new(chan, &fence, true);
-		if (ret == 0) {
-			ret = nouveau_fence_wait(fence, NULL, false, false);
-			nouveau_fence_unref((void *)&fence);
-		}
-
-		if (ret)
-			NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
+	if (unlikely(file_priv && chan->file_priv != file_priv)) {
+		nouveau_channel_put_unlocked(&chan);
+		return ERR_PTR(-EINVAL);
 	}
 
-	/* Ensure all outstanding fences are signaled.  They should be if the
+	mutex_lock(&chan->mutex);
+	return chan;
+}
+
+void
+nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
+{
+	struct nouveau_channel *chan = *pchan;
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
+	unsigned long flags;
+
+	/* decrement the refcount, and we're done if there's still refs */
+	if (likely(!atomic_dec_and_test(&chan->users))) {
+		nouveau_channel_ref(NULL, pchan);
+		return;
+	}
+
+	/* noone wants the channel anymore */
+	NV_DEBUG(dev, "freeing channel %d\n", chan->id);
+	nouveau_debugfs_channel_fini(chan);
+
+	/* give it chance to idle */
+	nouveau_channel_idle(chan);
+
+	/* ensure all outstanding fences are signaled.  they should be if the
 	 * above attempts at idling were OK, but if we failed this'll tell TTM
 	 * we're done with the buffers.
 	 */
 	nouveau_fence_channel_fini(chan);
 
-	/* This will prevent pfifo from switching channels. */
+	/* boot it off the hardware */
 	pfifo->reassign(dev, false);
 
-	/* We want to give pgraph a chance to idle and get rid of all potential
-	 * errors. We need to do this before the lock, otherwise the irq handler
-	 * is unable to process them.
+	/* We want to give pgraph a chance to idle and get rid of all
+	 * potential errors. We need to do this without the context
+	 * switch lock held, otherwise the irq handler is unable to
+	 * process them.
 	 */
 	if (pgraph->channel(dev) == chan)
 		nouveau_wait_for_idle(dev);
 
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-
-	pgraph->fifo_access(dev, false);
-	if (pgraph->channel(dev) == chan)
-		pgraph->unload_context(dev);
-	pgraph->destroy_context(chan);
-	pgraph->fifo_access(dev, true);
-
-	if (pfifo->channel_id(dev) == chan->id) {
-		pfifo->disable(dev);
-		pfifo->unload_context(dev);
-		pfifo->enable(dev);
-	}
+	/* destroy the engine specific contexts */
 	pfifo->destroy_context(chan);
+	pgraph->destroy_context(chan);
+	if (pcrypt->destroy_context)
+		pcrypt->destroy_context(chan);
 
 	pfifo->reassign(dev, true);
 
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+	/* aside from its resources, the channel should now be dead,
+	 * remove it from the channel list
+	 */
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
+	nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]);
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
 
-	/* Release the channel's resources */
+	/* destroy any resources the channel owned */
 	nouveau_gpuobj_ref(NULL, &chan->pushbuf);
 	if (chan->pushbuf_bo) {
 		nouveau_bo_unmap(chan->pushbuf_bo);
@@ -310,44 +322,80 @@
 	}
 	nouveau_gpuobj_channel_takedown(chan);
 	nouveau_notifier_takedown_channel(chan);
-	if (chan->user)
-		iounmap(chan->user);
 
-	dev_priv->fifos[chan->id] = NULL;
+	nouveau_channel_ref(NULL, pchan);
+}
+
+void
+nouveau_channel_put(struct nouveau_channel **pchan)
+{
+	mutex_unlock(&(*pchan)->mutex);
+	nouveau_channel_put_unlocked(pchan);
+}
+
+static void
+nouveau_channel_del(struct kref *ref)
+{
+	struct nouveau_channel *chan =
+		container_of(ref, struct nouveau_channel, ref);
+
 	kfree(chan);
 }
 
+void
+nouveau_channel_ref(struct nouveau_channel *chan,
+		    struct nouveau_channel **pchan)
+{
+	if (chan)
+		kref_get(&chan->ref);
+
+	if (*pchan)
+		kref_put(&(*pchan)->ref, nouveau_channel_del);
+
+	*pchan = chan;
+}
+
+void
+nouveau_channel_idle(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct nouveau_fence *fence = NULL;
+	int ret;
+
+	nouveau_fence_update(chan);
+
+	if (chan->fence.sequence != chan->fence.sequence_ack) {
+		ret = nouveau_fence_new(chan, &fence, true);
+		if (!ret) {
+			ret = nouveau_fence_wait(fence, false, false);
+			nouveau_fence_unref(&fence);
+		}
+
+		if (ret)
+			NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
+	}
+}
+
 /* cleans up all the fifos from file_priv */
 void
 nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_engine *engine = &dev_priv->engine;
+	struct nouveau_channel *chan;
 	int i;
 
 	NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
 	for (i = 0; i < engine->fifo.channels; i++) {
-		struct nouveau_channel *chan = dev_priv->fifos[i];
+		chan = nouveau_channel_get(dev, file_priv, i);
+		if (IS_ERR(chan))
+			continue;
 
-		if (chan && chan->file_priv == file_priv)
-			nouveau_channel_free(chan);
+		atomic_dec(&chan->users);
+		nouveau_channel_put(&chan);
 	}
 }
 
-int
-nouveau_channel_owner(struct drm_device *dev, struct drm_file *file_priv,
-		      int channel)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_engine *engine = &dev_priv->engine;
-
-	if (channel >= engine->fifo.channels)
-		return 0;
-	if (dev_priv->fifos[channel] == NULL)
-		return 0;
-
-	return (dev_priv->fifos[channel]->file_priv == file_priv);
-}
 
 /***********************************
  * ioctls wrapping the functions
@@ -395,24 +443,26 @@
 	/* Named memory object area */
 	ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
 				    &init->notifier_handle);
-	if (ret) {
-		nouveau_channel_free(chan);
-		return ret;
-	}
 
-	return 0;
+	if (ret == 0)
+		atomic_inc(&chan->users); /* userspace reference */
+	nouveau_channel_put(&chan);
+	return ret;
 }
 
 static int
 nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
 			struct drm_file *file_priv)
 {
-	struct drm_nouveau_channel_free *cfree = data;
+	struct drm_nouveau_channel_free *req = data;
 	struct nouveau_channel *chan;
 
-	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan);
+	chan = nouveau_channel_get(dev, file_priv, req->channel);
+	if (IS_ERR(chan))
+		return PTR_ERR(chan);
 
-	nouveau_channel_free(chan);
+	atomic_dec(&chan->users);
+	nouveau_channel_put(&chan);
 	return 0;
 }
 
@@ -421,18 +471,18 @@
  ***********************************/
 
 struct drm_ioctl_desc nouveau_ioctls[] = {
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),
 };
 
 int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 52c356e..a21e000 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -37,6 +37,8 @@
 #include "nouveau_connector.h"
 #include "nouveau_hw.h"
 
+static void nouveau_connector_hotplug(void *, int);
+
 static struct nouveau_encoder *
 find_encoder_by_type(struct drm_connector *connector, int type)
 {
@@ -94,22 +96,30 @@
 }
 
 static void
-nouveau_connector_destroy(struct drm_connector *drm_connector)
+nouveau_connector_destroy(struct drm_connector *connector)
 {
-	struct nouveau_connector *nv_connector =
-		nouveau_connector(drm_connector);
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	struct drm_nouveau_private *dev_priv;
+	struct nouveau_gpio_engine *pgpio;
 	struct drm_device *dev;
 
 	if (!nv_connector)
 		return;
 
 	dev = nv_connector->base.dev;
+	dev_priv = dev->dev_private;
 	NV_DEBUG_KMS(dev, "\n");
 
+	pgpio = &dev_priv->engine.gpio;
+	if (pgpio->irq_unregister) {
+		pgpio->irq_unregister(dev, nv_connector->dcb->gpio_tag,
+				      nouveau_connector_hotplug, connector);
+	}
+
 	kfree(nv_connector->edid);
-	drm_sysfs_connector_remove(drm_connector);
-	drm_connector_cleanup(drm_connector);
-	kfree(drm_connector);
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
 }
 
 static struct nouveau_i2c_chan *
@@ -760,6 +770,7 @@
 {
 	const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
 	struct nouveau_connector *nv_connector = NULL;
 	struct dcb_connector_table_entry *dcb = NULL;
 	struct drm_connector *connector;
@@ -876,6 +887,11 @@
 		break;
 	}
 
+	if (pgpio->irq_register) {
+		pgpio->irq_register(dev, nv_connector->dcb->gpio_tag,
+				    nouveau_connector_hotplug, connector);
+	}
+
 	drm_sysfs_connector_add(connector);
 	dcb->drm = connector;
 	return dcb->drm;
@@ -886,3 +902,29 @@
 	return ERR_PTR(ret);
 
 }
+
+static void
+nouveau_connector_hotplug(void *data, int plugged)
+{
+	struct drm_connector *connector = data;
+	struct drm_device *dev = connector->dev;
+
+	NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
+		drm_get_connector_name(connector));
+
+	if (connector->encoder && connector->encoder->crtc &&
+	    connector->encoder->crtc->enabled) {
+		struct nouveau_encoder *nv_encoder = nouveau_encoder(connector->encoder);
+		struct drm_encoder_helper_funcs *helper =
+			connector->encoder->helper_private;
+
+		if (nv_encoder->dcb->type == OUTPUT_DP) {
+			if (plugged)
+				helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
+			else
+				helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
+		}
+	}
+
+	drm_helper_hpd_irq_event(dev);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 2e11fd6..505c6bf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -29,6 +29,9 @@
 #include "nouveau_drv.h"
 #include "nouveau_fb.h"
 #include "nouveau_fbcon.h"
+#include "nouveau_hw.h"
+#include "nouveau_crtc.h"
+#include "nouveau_dma.h"
 
 static void
 nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
@@ -104,3 +107,207 @@
 	.output_poll_changed = nouveau_fbcon_output_poll_changed,
 };
 
+int
+nouveau_vblank_enable(struct drm_device *dev, int crtc)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->card_type >= NV_50)
+		nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 0,
+			NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc));
+	else
+		NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0,
+			    NV_PCRTC_INTR_0_VBLANK);
+
+	return 0;
+}
+
+void
+nouveau_vblank_disable(struct drm_device *dev, int crtc)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->card_type >= NV_50)
+		nv_mask(dev, NV50_PDISPLAY_INTR_EN_1,
+			NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0);
+	else
+		NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 0);
+}
+
+static int
+nouveau_page_flip_reserve(struct nouveau_bo *old_bo,
+			  struct nouveau_bo *new_bo)
+{
+	int ret;
+
+	ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
+	if (ret)
+		return ret;
+
+	ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0);
+	if (ret)
+		goto fail;
+
+	ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0);
+	if (ret)
+		goto fail_unreserve;
+
+	return 0;
+
+fail_unreserve:
+	ttm_bo_unreserve(&new_bo->bo);
+fail:
+	nouveau_bo_unpin(new_bo);
+	return ret;
+}
+
+static void
+nouveau_page_flip_unreserve(struct nouveau_bo *old_bo,
+			    struct nouveau_bo *new_bo,
+			    struct nouveau_fence *fence)
+{
+	nouveau_bo_fence(new_bo, fence);
+	ttm_bo_unreserve(&new_bo->bo);
+
+	nouveau_bo_fence(old_bo, fence);
+	ttm_bo_unreserve(&old_bo->bo);
+
+	nouveau_bo_unpin(old_bo);
+}
+
+static int
+nouveau_page_flip_emit(struct nouveau_channel *chan,
+		       struct nouveau_bo *old_bo,
+		       struct nouveau_bo *new_bo,
+		       struct nouveau_page_flip_state *s,
+		       struct nouveau_fence **pfence)
+{
+	struct drm_device *dev = chan->dev;
+	unsigned long flags;
+	int ret;
+
+	/* Queue it to the pending list */
+	spin_lock_irqsave(&dev->event_lock, flags);
+	list_add_tail(&s->head, &chan->nvsw.flip);
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	/* Synchronize with the old framebuffer */
+	ret = nouveau_fence_sync(old_bo->bo.sync_obj, chan);
+	if (ret)
+		goto fail;
+
+	/* Emit the pageflip */
+	ret = RING_SPACE(chan, 2);
+	if (ret)
+		goto fail;
+
+	BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
+	OUT_RING(chan, 0);
+	FIRE_RING(chan);
+
+	ret = nouveau_fence_new(chan, pfence, true);
+	if (ret)
+		goto fail;
+
+	return 0;
+fail:
+	spin_lock_irqsave(&dev->event_lock, flags);
+	list_del(&s->head);
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+	return ret;
+}
+
+int
+nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+		       struct drm_pending_vblank_event *event)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
+	struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
+	struct nouveau_page_flip_state *s;
+	struct nouveau_channel *chan;
+	struct nouveau_fence *fence;
+	int ret;
+
+	if (dev_priv->engine.graph.accel_blocked)
+		return -ENODEV;
+
+	s = kzalloc(sizeof(*s), GFP_KERNEL);
+	if (!s)
+		return -ENOMEM;
+
+	/* Don't let the buffers go away while we flip */
+	ret = nouveau_page_flip_reserve(old_bo, new_bo);
+	if (ret)
+		goto fail_free;
+
+	/* Initialize a page flip struct */
+	*s = (struct nouveau_page_flip_state)
+		{ { }, s->event, nouveau_crtc(crtc)->index,
+		  fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y,
+		  new_bo->bo.offset };
+
+	/* Choose the channel the flip will be handled in */
+	chan = nouveau_fence_channel(new_bo->bo.sync_obj);
+	if (!chan)
+		chan = nouveau_channel_get_unlocked(dev_priv->channel);
+	mutex_lock(&chan->mutex);
+
+	/* Emit a page flip */
+	ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
+	nouveau_channel_put(&chan);
+	if (ret)
+		goto fail_unreserve;
+
+	/* Update the crtc struct and cleanup */
+	crtc->fb = fb;
+
+	nouveau_page_flip_unreserve(old_bo, new_bo, fence);
+	nouveau_fence_unref(&fence);
+	return 0;
+
+fail_unreserve:
+	nouveau_page_flip_unreserve(old_bo, new_bo, NULL);
+fail_free:
+	kfree(s);
+	return ret;
+}
+
+int
+nouveau_finish_page_flip(struct nouveau_channel *chan,
+			 struct nouveau_page_flip_state *ps)
+{
+	struct drm_device *dev = chan->dev;
+	struct nouveau_page_flip_state *s;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+
+	if (list_empty(&chan->nvsw.flip)) {
+		NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id);
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		return -EINVAL;
+	}
+
+	s = list_first_entry(&chan->nvsw.flip,
+			     struct nouveau_page_flip_state, head);
+	if (s->event) {
+		struct drm_pending_vblank_event *e = s->event;
+		struct timeval now;
+
+		do_gettimeofday(&now);
+		e->event.sequence = 0;
+		e->event.tv_sec = now.tv_sec;
+		e->event.tv_usec = now.tv_usec;
+		list_add_tail(&e->base.link, &e->base.file_priv->event_list);
+		wake_up_interruptible(&e->base.file_priv->event_wait);
+	}
+
+	list_del(&s->head);
+	*ps = *s;
+	kfree(s);
+
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 82581e6..6ff77ce 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -59,17 +59,11 @@
 {
 	struct drm_device *dev = chan->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *obj = NULL;
 	int ret, i;
 
 	/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
-	ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ?
-				    0x0039 : 0x5039, &obj);
-	if (ret)
-		return ret;
-
-	ret = nouveau_ramht_insert(chan, NvM2MF, obj);
-	nouveau_gpuobj_ref(NULL, &obj);
+	ret = nouveau_gpuobj_gr_new(chan, NvM2MF, dev_priv->card_type < NV_50 ?
+				    0x0039 : 0x5039);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 4562f30..38d5995 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -279,7 +279,7 @@
 	struct bit_displayport_encoder_table *dpe;
 	int dpe_headerlen;
 	uint8_t config[4], status[3];
-	bool cr_done, cr_max_vs, eq_done;
+	bool cr_done, cr_max_vs, eq_done, hpd_state;
 	int ret = 0, i, tries, voltage;
 
 	NV_DEBUG_KMS(dev, "link training!!\n");
@@ -297,7 +297,7 @@
 	/* disable hotplug detect, this flips around on some panels during
 	 * link training.
 	 */
-	pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
+	hpd_state = pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
 
 	if (dpe->script0) {
 		NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
@@ -439,7 +439,7 @@
 	}
 
 	/* re-enable hotplug detect */
-	pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true);
+	pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, hpd_state);
 
 	return eq_done;
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 9087549..bb17057 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -115,6 +115,10 @@
 int nouveau_perflvl_wr;
 module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
 
+MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n");
+int nouveau_msi;
+module_param_named(msi, nouveau_msi, int, 0400);
+
 int nouveau_fbpercrtc;
 #if 0
 module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
@@ -193,23 +197,10 @@
 
 	NV_INFO(dev, "Idling channels...\n");
 	for (i = 0; i < pfifo->channels; i++) {
-		struct nouveau_fence *fence = NULL;
+		chan = dev_priv->channels.ptr[i];
 
-		chan = dev_priv->fifos[i];
-		if (!chan || (dev_priv->card_type >= NV_50 &&
-			      chan == dev_priv->fifos[0]))
-			continue;
-
-		ret = nouveau_fence_new(chan, &fence, true);
-		if (ret == 0) {
-			ret = nouveau_fence_wait(fence, NULL, false, false);
-			nouveau_fence_unref((void *)&fence);
-		}
-
-		if (ret) {
-			NV_ERROR(dev, "Failed to idle channel %d for suspend\n",
-				 chan->id);
-		}
+		if (chan && chan->pushbuf_bo)
+			nouveau_channel_idle(chan);
 	}
 
 	pgraph->fifo_access(dev, false);
@@ -219,17 +210,17 @@
 	pfifo->unload_context(dev);
 	pgraph->unload_context(dev);
 
-	NV_INFO(dev, "Suspending GPU objects...\n");
-	ret = nouveau_gpuobj_suspend(dev);
+	ret = pinstmem->suspend(dev);
 	if (ret) {
 		NV_ERROR(dev, "... failed: %d\n", ret);
 		goto out_abort;
 	}
 
-	ret = pinstmem->suspend(dev);
+	NV_INFO(dev, "Suspending GPU objects...\n");
+	ret = nouveau_gpuobj_suspend(dev);
 	if (ret) {
 		NV_ERROR(dev, "... failed: %d\n", ret);
-		nouveau_gpuobj_suspend_cleanup(dev);
+		pinstmem->resume(dev);
 		goto out_abort;
 	}
 
@@ -294,17 +285,18 @@
 		}
 	}
 
+	NV_INFO(dev, "Restoring GPU objects...\n");
+	nouveau_gpuobj_resume(dev);
+
 	NV_INFO(dev, "Reinitialising engines...\n");
 	engine->instmem.resume(dev);
 	engine->mc.init(dev);
 	engine->timer.init(dev);
 	engine->fb.init(dev);
 	engine->graph.init(dev);
+	engine->crypt.init(dev);
 	engine->fifo.init(dev);
 
-	NV_INFO(dev, "Restoring GPU objects...\n");
-	nouveau_gpuobj_resume(dev);
-
 	nouveau_irq_postinstall(dev);
 
 	/* Re-write SKIPS, they'll have been lost over the suspend */
@@ -313,7 +305,7 @@
 		int j;
 
 		for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
-			chan = dev_priv->fifos[i];
+			chan = dev_priv->channels.ptr[i];
 			if (!chan || !chan->pushbuf_bo)
 				continue;
 
@@ -347,13 +339,11 @@
 
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+		u32 offset = nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT;
 
-		nv_crtc->cursor.set_offset(nv_crtc,
-					nv_crtc->cursor.nvbo->bo.offset -
-					dev_priv->vm_vram_base);
-
+		nv_crtc->cursor.set_offset(nv_crtc, offset);
 		nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
-			nv_crtc->cursor_saved_y);
+						 nv_crtc->cursor_saved_y);
 	}
 
 	/* Force CLUT to get re-loaded during modeset */
@@ -393,6 +383,9 @@
 	.irq_postinstall = nouveau_irq_postinstall,
 	.irq_uninstall = nouveau_irq_uninstall,
 	.irq_handler = nouveau_irq_handler,
+	.get_vblank_counter = drm_vblank_count,
+	.enable_vblank = nouveau_vblank_enable,
+	.disable_vblank = nouveau_vblank_disable,
 	.reclaim_buffers = drm_core_reclaim_buffers,
 	.ioctls = nouveau_ioctls,
 	.fops = {
@@ -403,6 +396,7 @@
 		.mmap = nouveau_ttm_mmap,
 		.poll = drm_poll,
 		.fasync = drm_fasync,
+		.read = drm_read,
 #if defined(CONFIG_COMPAT)
 		.compat_ioctl = nouveau_compat_ioctl,
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 1c7db64..8f13906 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -54,22 +54,36 @@
 #include "nouveau_drm.h"
 #include "nouveau_reg.h"
 #include "nouveau_bios.h"
+#include "nouveau_util.h"
+
 struct nouveau_grctx;
+struct nouveau_vram;
+#include "nouveau_vm.h"
 
 #define MAX_NUM_DCB_ENTRIES 16
 
 #define NOUVEAU_MAX_CHANNEL_NR 128
 #define NOUVEAU_MAX_TILE_NR 15
 
-#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL)
-#define NV50_VM_BLOCK    (512*1024*1024ULL)
-#define NV50_VM_VRAM_NR  (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
+struct nouveau_vram {
+	struct drm_device *dev;
+
+	struct nouveau_vma bar_vma;
+
+	struct list_head regions;
+	u32 memtype;
+	u64 offset;
+	u64 size;
+};
 
 struct nouveau_tile_reg {
-	struct nouveau_fence *fence;
-	uint32_t addr;
-	uint32_t size;
 	bool used;
+	uint32_t addr;
+	uint32_t limit;
+	uint32_t pitch;
+	uint32_t zcomp;
+	struct drm_mm_node *tag_mem;
+	struct nouveau_fence *fence;
 };
 
 struct nouveau_bo {
@@ -88,6 +102,7 @@
 
 	struct nouveau_channel *channel;
 
+	struct nouveau_vma vma;
 	bool mappable;
 	bool no_vm;
 
@@ -96,7 +111,6 @@
 	struct nouveau_tile_reg *tile;
 
 	struct drm_gem_object *gem;
-	struct drm_file *cpu_filp;
 	int pin_refcnt;
 };
 
@@ -133,20 +147,28 @@
 
 #define NVOBJ_ENGINE_SW		0
 #define NVOBJ_ENGINE_GR		1
-#define NVOBJ_ENGINE_DISPLAY	2
+#define NVOBJ_ENGINE_PPP	2
+#define NVOBJ_ENGINE_COPY	3
+#define NVOBJ_ENGINE_VP		4
+#define NVOBJ_ENGINE_CRYPT      5
+#define NVOBJ_ENGINE_BSP	6
+#define NVOBJ_ENGINE_DISPLAY	0xcafe0001
 #define NVOBJ_ENGINE_INT	0xdeadbeef
 
+#define NVOBJ_FLAG_DONT_MAP             (1 << 0)
 #define NVOBJ_FLAG_ZERO_ALLOC		(1 << 1)
 #define NVOBJ_FLAG_ZERO_FREE		(1 << 2)
+#define NVOBJ_FLAG_VM			(1 << 3)
+
+#define NVOBJ_CINST_GLOBAL	0xdeadbeef
+
 struct nouveau_gpuobj {
 	struct drm_device *dev;
 	struct kref refcount;
 	struct list_head list;
 
-	struct drm_mm_node *im_pramin;
-	struct nouveau_bo *im_backing;
-	uint32_t *im_backing_suspend;
-	int im_bound;
+	void *node;
+	u32 *suspend;
 
 	uint32_t flags;
 
@@ -162,10 +184,29 @@
 	void *priv;
 };
 
+struct nouveau_page_flip_state {
+	struct list_head head;
+	struct drm_pending_vblank_event *event;
+	int crtc, bpp, pitch, x, y;
+	uint64_t offset;
+};
+
+enum nouveau_channel_mutex_class {
+	NOUVEAU_UCHANNEL_MUTEX,
+	NOUVEAU_KCHANNEL_MUTEX
+};
+
 struct nouveau_channel {
 	struct drm_device *dev;
 	int id;
 
+	/* references to the channel data structure */
+	struct kref ref;
+	/* users of the hardware channel resources, the hardware
+	 * context will be kicked off when it reaches zero. */
+	atomic_t users;
+	struct mutex mutex;
+
 	/* owner of this fifo */
 	struct drm_file *file_priv;
 	/* mapping of the fifo itself */
@@ -202,12 +243,12 @@
 	/* PGRAPH context */
 	/* XXX may be merge 2 pointers as private data ??? */
 	struct nouveau_gpuobj *ramin_grctx;
+	struct nouveau_gpuobj *crypt_ctx;
 	void *pgraph_ctx;
 
 	/* NV50 VM */
+	struct nouveau_vm     *vm;
 	struct nouveau_gpuobj *vm_pd;
-	struct nouveau_gpuobj *vm_gart_pt;
-	struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
 
 	/* Objects */
 	struct nouveau_gpuobj *ramin; /* Private instmem */
@@ -238,9 +279,11 @@
 
 	struct {
 		struct nouveau_gpuobj *vblsem;
+		uint32_t vblsem_head;
 		uint32_t vblsem_offset;
 		uint32_t vblsem_rval;
 		struct list_head vbl_wait;
+		struct list_head flip;
 	} nvsw;
 
 	struct {
@@ -258,11 +301,11 @@
 	int	(*suspend)(struct drm_device *dev);
 	void	(*resume)(struct drm_device *dev);
 
-	int	(*populate)(struct drm_device *, struct nouveau_gpuobj *,
-			    uint32_t *size);
-	void	(*clear)(struct drm_device *, struct nouveau_gpuobj *);
-	int	(*bind)(struct drm_device *, struct nouveau_gpuobj *);
-	int	(*unbind)(struct drm_device *, struct nouveau_gpuobj *);
+	int	(*get)(struct nouveau_gpuobj *, u32 size, u32 align);
+	void	(*put)(struct nouveau_gpuobj *);
+	int	(*map)(struct nouveau_gpuobj *);
+	void	(*unmap)(struct nouveau_gpuobj *);
+
 	void	(*flush)(struct drm_device *);
 };
 
@@ -279,12 +322,17 @@
 
 struct nouveau_fb_engine {
 	int num_tiles;
+	struct drm_mm tag_heap;
+	void *priv;
 
 	int  (*init)(struct drm_device *dev);
 	void (*takedown)(struct drm_device *dev);
 
-	void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
-				 uint32_t size, uint32_t pitch);
+	void (*init_tile_region)(struct drm_device *dev, int i,
+				 uint32_t addr, uint32_t size,
+				 uint32_t pitch, uint32_t flags);
+	void (*set_tile_region)(struct drm_device *dev, int i);
+	void (*free_tile_region)(struct drm_device *dev, int i);
 };
 
 struct nouveau_fifo_engine {
@@ -310,21 +358,9 @@
 	void (*tlb_flush)(struct drm_device *dev);
 };
 
-struct nouveau_pgraph_object_method {
-	int id;
-	int (*exec)(struct nouveau_channel *chan, int grclass, int mthd,
-		      uint32_t data);
-};
-
-struct nouveau_pgraph_object_class {
-	int id;
-	bool software;
-	struct nouveau_pgraph_object_method *methods;
-};
-
 struct nouveau_pgraph_engine {
-	struct nouveau_pgraph_object_class *grclass;
 	bool accel_blocked;
+	bool registered;
 	int grctx_size;
 
 	/* NV2x/NV3x context table (0x400780) */
@@ -342,8 +378,7 @@
 	int  (*unload_context)(struct drm_device *);
 	void (*tlb_flush)(struct drm_device *dev);
 
-	void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
-				  uint32_t size, uint32_t pitch);
+	void (*set_tile_region)(struct drm_device *dev, int i);
 };
 
 struct nouveau_display_engine {
@@ -355,13 +390,19 @@
 };
 
 struct nouveau_gpio_engine {
+	void *priv;
+
 	int  (*init)(struct drm_device *);
 	void (*takedown)(struct drm_device *);
 
 	int  (*get)(struct drm_device *, enum dcb_gpio_tag);
 	int  (*set)(struct drm_device *, enum dcb_gpio_tag, int state);
 
-	void (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
+	int  (*irq_register)(struct drm_device *, enum dcb_gpio_tag,
+			     void (*)(void *, int), void *);
+	void (*irq_unregister)(struct drm_device *, enum dcb_gpio_tag,
+			       void (*)(void *, int), void *);
+	bool (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
 };
 
 struct nouveau_pm_voltage_level {
@@ -437,6 +478,7 @@
 	struct nouveau_pm_level *cur;
 
 	struct device *hwmon;
+	struct notifier_block acpi_nb;
 
 	int (*clock_get)(struct drm_device *, u32 id);
 	void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *,
@@ -449,6 +491,25 @@
 	int (*temp_get)(struct drm_device *);
 };
 
+struct nouveau_crypt_engine {
+	bool registered;
+
+	int  (*init)(struct drm_device *);
+	void (*takedown)(struct drm_device *);
+	int  (*create_context)(struct nouveau_channel *);
+	void (*destroy_context)(struct nouveau_channel *);
+	void (*tlb_flush)(struct drm_device *dev);
+};
+
+struct nouveau_vram_engine {
+	int  (*init)(struct drm_device *);
+	int  (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
+		    u32 type, struct nouveau_vram **);
+	void (*put)(struct drm_device *, struct nouveau_vram **);
+
+	bool (*flags_valid)(struct drm_device *, u32 tile_flags);
+};
+
 struct nouveau_engine {
 	struct nouveau_instmem_engine instmem;
 	struct nouveau_mc_engine      mc;
@@ -459,6 +520,8 @@
 	struct nouveau_display_engine display;
 	struct nouveau_gpio_engine    gpio;
 	struct nouveau_pm_engine      pm;
+	struct nouveau_crypt_engine   crypt;
+	struct nouveau_vram_engine    vram;
 };
 
 struct nouveau_pll_vals {
@@ -577,18 +640,15 @@
 	bool ramin_available;
 	struct drm_mm ramin_heap;
 	struct list_head gpuobj_list;
+	struct list_head classes;
 
 	struct nouveau_bo *vga_ram;
 
+	/* interrupt handling */
+	void (*irq_handler[32])(struct drm_device *);
+	bool msi_enabled;
 	struct workqueue_struct *wq;
 	struct work_struct irq_work;
-	struct work_struct hpd_work;
-
-	struct {
-		spinlock_t lock;
-		uint32_t hpd0_bits;
-		uint32_t hpd1_bits;
-	} hpd_state;
 
 	struct list_head vbl_waiting;
 
@@ -605,8 +665,10 @@
 		struct nouveau_bo *bo;
 	} fence;
 
-	int fifo_alloc_count;
-	struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
+	struct {
+		spinlock_t lock;
+		struct nouveau_channel *ptr[NOUVEAU_MAX_CHANNEL_NR];
+	} channels;
 
 	struct nouveau_engine engine;
 	struct nouveau_channel *channel;
@@ -632,12 +694,14 @@
 		uint64_t aper_free;
 
 		struct nouveau_gpuobj *sg_ctxdma;
-		struct page *sg_dummy_page;
-		dma_addr_t sg_dummy_bus;
+		struct nouveau_vma vma;
 	} gart_info;
 
 	/* nv10-nv40 tiling regions */
-	struct nouveau_tile_reg tile[NOUVEAU_MAX_TILE_NR];
+	struct {
+		struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
+		spinlock_t lock;
+	} tile;
 
 	/* VRAM/fb configuration */
 	uint64_t vram_size;
@@ -650,14 +714,12 @@
 	uint64_t fb_aper_free;
 	int fb_mtrr;
 
+	/* BAR control (NV50-) */
+	struct nouveau_vm *bar1_vm;
+	struct nouveau_vm *bar3_vm;
+
 	/* G8x/G9x virtual address space */
-	uint64_t vm_gart_base;
-	uint64_t vm_gart_size;
-	uint64_t vm_vram_base;
-	uint64_t vm_vram_size;
-	uint64_t vm_end;
-	struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
-	int vm_vram_pt_nr;
+	struct nouveau_vm *chan_vm;
 
 	struct nvbios vbios;
 
@@ -674,6 +736,7 @@
 	struct backlight_device *backlight;
 
 	struct nouveau_channel *evo;
+	u32 evo_alloc;
 	struct {
 		struct dcb_entry *dcb;
 		u16 script;
@@ -719,16 +782,6 @@
 	return 0;
 }
 
-#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do {    \
-	struct drm_nouveau_private *nv = dev->dev_private;       \
-	if (!nouveau_channel_owner(dev, (cl), (id))) {           \
-		NV_ERROR(dev, "pid %d doesn't own channel %d\n", \
-			 DRM_CURRENTPID, (id));                  \
-		return -EPERM;                                   \
-	}                                                        \
-	(ch) = nv->fifos[(id)];                                  \
-} while (0)
-
 /* nouveau_drv.c */
 extern int nouveau_agpmode;
 extern int nouveau_duallink;
@@ -748,6 +801,7 @@
 extern int nouveau_override_conntype;
 extern char *nouveau_perflvl;
 extern int nouveau_perflvl_wr;
+extern int nouveau_msi;
 
 extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
 extern int nouveau_pci_resume(struct pci_dev *pdev);
@@ -762,8 +816,10 @@
 				   struct drm_file *);
 extern int  nouveau_ioctl_setparam(struct drm_device *, void *data,
 				   struct drm_file *);
-extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout,
-			       uint32_t reg, uint32_t mask, uint32_t val);
+extern bool nouveau_wait_eq(struct drm_device *, uint64_t timeout,
+			    uint32_t reg, uint32_t mask, uint32_t val);
+extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout,
+			    uint32_t reg, uint32_t mask, uint32_t val);
 extern bool nouveau_wait_for_idle(struct drm_device *);
 extern int  nouveau_card_init(struct drm_device *);
 
@@ -775,18 +831,15 @@
 extern int  nouveau_mem_init_agp(struct drm_device *);
 extern int  nouveau_mem_reset_agp(struct drm_device *);
 extern void nouveau_mem_close(struct drm_device *);
-extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev,
-						    uint32_t addr,
-						    uint32_t size,
-						    uint32_t pitch);
-extern void nv10_mem_expire_tiling(struct drm_device *dev,
-				   struct nouveau_tile_reg *tile,
-				   struct nouveau_fence *fence);
-extern int  nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
-				    uint32_t size, uint32_t flags,
-				    uint64_t phys);
-extern void nv50_mem_vm_unbind(struct drm_device *, uint64_t virt,
-			       uint32_t size);
+extern int  nouveau_mem_detect(struct drm_device *);
+extern bool nouveau_mem_flags_valid(struct drm_device *, u32 tile_flags);
+extern struct nouveau_tile_reg *nv10_mem_set_tiling(
+	struct drm_device *dev, uint32_t addr, uint32_t size,
+	uint32_t pitch, uint32_t flags);
+extern void nv10_mem_put_tile_region(struct drm_device *dev,
+				     struct nouveau_tile_reg *tile,
+				     struct nouveau_fence *fence);
+extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
 
 /* nouveau_notifier.c */
 extern int  nouveau_notifier_init_channel(struct nouveau_channel *);
@@ -803,21 +856,44 @@
 extern struct drm_ioctl_desc nouveau_ioctls[];
 extern int nouveau_max_ioctl;
 extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *);
-extern int  nouveau_channel_owner(struct drm_device *, struct drm_file *,
-				  int channel);
 extern int  nouveau_channel_alloc(struct drm_device *dev,
 				  struct nouveau_channel **chan,
 				  struct drm_file *file_priv,
 				  uint32_t fb_ctxdma, uint32_t tt_ctxdma);
-extern void nouveau_channel_free(struct nouveau_channel *);
+extern struct nouveau_channel *
+nouveau_channel_get_unlocked(struct nouveau_channel *);
+extern struct nouveau_channel *
+nouveau_channel_get(struct drm_device *, struct drm_file *, int id);
+extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
+extern void nouveau_channel_put(struct nouveau_channel **);
+extern void nouveau_channel_ref(struct nouveau_channel *chan,
+				struct nouveau_channel **pchan);
+extern void nouveau_channel_idle(struct nouveau_channel *chan);
 
 /* nouveau_object.c */
+#define NVOBJ_CLASS(d,c,e) do {                                                \
+	int ret = nouveau_gpuobj_class_new((d), (c), NVOBJ_ENGINE_##e);        \
+	if (ret)                                                               \
+		return ret;                                                    \
+} while(0)
+
+#define NVOBJ_MTHD(d,c,m,e) do {                                               \
+	int ret = nouveau_gpuobj_mthd_new((d), (c), (m), (e));                 \
+	if (ret)                                                               \
+		return ret;                                                    \
+} while(0)
+
 extern int  nouveau_gpuobj_early_init(struct drm_device *);
 extern int  nouveau_gpuobj_init(struct drm_device *);
 extern void nouveau_gpuobj_takedown(struct drm_device *);
 extern int  nouveau_gpuobj_suspend(struct drm_device *dev);
-extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev);
 extern void nouveau_gpuobj_resume(struct drm_device *dev);
+extern int  nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng);
+extern int  nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd,
+				    int (*exec)(struct nouveau_channel *,
+					        u32 class, u32 mthd, u32 data));
+extern int  nouveau_gpuobj_mthd_call(struct nouveau_channel *, u32, u32, u32);
+extern int  nouveau_gpuobj_mthd_call2(struct drm_device *, int, u32, u32, u32);
 extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
 				       uint32_t vram_h, uint32_t tt_h);
 extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
@@ -832,21 +908,25 @@
 extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
 				  uint64_t offset, uint64_t size, int access,
 				  int target, struct nouveau_gpuobj **);
-extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
-				       uint64_t offset, uint64_t size,
-				       int access, struct nouveau_gpuobj **,
-				       uint32_t *o_ret);
-extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
-				 struct nouveau_gpuobj **);
-extern int nouveau_gpuobj_sw_new(struct nouveau_channel *, int class,
-				 struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, u32 handle, int class);
+extern int nv50_gpuobj_dma_new(struct nouveau_channel *, int class, u64 base,
+			       u64 size, int target, int access, u32 type,
+			       u32 comp, struct nouveau_gpuobj **pobj);
+extern void nv50_gpuobj_dma_init(struct nouveau_gpuobj *, u32 offset,
+				 int class, u64 base, u64 size, int target,
+				 int access, u32 type, u32 comp);
 extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
 				     struct drm_file *);
 extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
 				     struct drm_file *);
 
 /* nouveau_irq.c */
+extern int         nouveau_irq_init(struct drm_device *);
+extern void        nouveau_irq_fini(struct drm_device *);
 extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
+extern void        nouveau_irq_register(struct drm_device *, int status_bit,
+					void (*)(struct drm_device *));
+extern void        nouveau_irq_unregister(struct drm_device *, int status_bit);
 extern void        nouveau_irq_preinstall(struct drm_device *);
 extern int         nouveau_irq_postinstall(struct drm_device *);
 extern void        nouveau_irq_uninstall(struct drm_device *);
@@ -854,8 +934,8 @@
 /* nouveau_sgdma.c */
 extern int nouveau_sgdma_init(struct drm_device *);
 extern void nouveau_sgdma_takedown(struct drm_device *);
-extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset,
-				  uint32_t *page);
+extern uint32_t nouveau_sgdma_get_physical(struct drm_device *,
+					   uint32_t offset);
 extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
 
 /* nouveau_debugfs.c */
@@ -966,18 +1046,25 @@
 /* nv10_fb.c */
 extern int  nv10_fb_init(struct drm_device *);
 extern void nv10_fb_takedown(struct drm_device *);
-extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t,
-				      uint32_t, uint32_t);
+extern void nv10_fb_init_tile_region(struct drm_device *dev, int i,
+				     uint32_t addr, uint32_t size,
+				     uint32_t pitch, uint32_t flags);
+extern void nv10_fb_set_tile_region(struct drm_device *dev, int i);
+extern void nv10_fb_free_tile_region(struct drm_device *dev, int i);
 
 /* nv30_fb.c */
 extern int  nv30_fb_init(struct drm_device *);
 extern void nv30_fb_takedown(struct drm_device *);
+extern void nv30_fb_init_tile_region(struct drm_device *dev, int i,
+				     uint32_t addr, uint32_t size,
+				     uint32_t pitch, uint32_t flags);
+extern void nv30_fb_free_tile_region(struct drm_device *dev, int i);
 
 /* nv40_fb.c */
 extern int  nv40_fb_init(struct drm_device *);
 extern void nv40_fb_takedown(struct drm_device *);
-extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
-				      uint32_t, uint32_t);
+extern void nv40_fb_set_tile_region(struct drm_device *dev, int i);
+
 /* nv50_fb.c */
 extern int  nv50_fb_init(struct drm_device *);
 extern void nv50_fb_takedown(struct drm_device *);
@@ -989,6 +1076,7 @@
 
 /* nv04_fifo.c */
 extern int  nv04_fifo_init(struct drm_device *);
+extern void nv04_fifo_fini(struct drm_device *);
 extern void nv04_fifo_disable(struct drm_device *);
 extern void nv04_fifo_enable(struct drm_device *);
 extern bool nv04_fifo_reassign(struct drm_device *, bool);
@@ -998,19 +1086,18 @@
 extern void nv04_fifo_destroy_context(struct nouveau_channel *);
 extern int  nv04_fifo_load_context(struct nouveau_channel *);
 extern int  nv04_fifo_unload_context(struct drm_device *);
+extern void nv04_fifo_isr(struct drm_device *);
 
 /* nv10_fifo.c */
 extern int  nv10_fifo_init(struct drm_device *);
 extern int  nv10_fifo_channel_id(struct drm_device *);
 extern int  nv10_fifo_create_context(struct nouveau_channel *);
-extern void nv10_fifo_destroy_context(struct nouveau_channel *);
 extern int  nv10_fifo_load_context(struct nouveau_channel *);
 extern int  nv10_fifo_unload_context(struct drm_device *);
 
 /* nv40_fifo.c */
 extern int  nv40_fifo_init(struct drm_device *);
 extern int  nv40_fifo_create_context(struct nouveau_channel *);
-extern void nv40_fifo_destroy_context(struct nouveau_channel *);
 extern int  nv40_fifo_load_context(struct nouveau_channel *);
 extern int  nv40_fifo_unload_context(struct drm_device *);
 
@@ -1038,7 +1125,6 @@
 extern int  nvc0_fifo_unload_context(struct drm_device *);
 
 /* nv04_graph.c */
-extern struct nouveau_pgraph_object_class nv04_graph_grclass[];
 extern int  nv04_graph_init(struct drm_device *);
 extern void nv04_graph_takedown(struct drm_device *);
 extern void nv04_graph_fifo_access(struct drm_device *, bool);
@@ -1047,10 +1133,11 @@
 extern void nv04_graph_destroy_context(struct nouveau_channel *);
 extern int  nv04_graph_load_context(struct nouveau_channel *);
 extern int  nv04_graph_unload_context(struct drm_device *);
-extern void nv04_graph_context_switch(struct drm_device *);
+extern int  nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
+				      u32 class, u32 mthd, u32 data);
+extern struct nouveau_bitfield nv04_graph_nsource[];
 
 /* nv10_graph.c */
-extern struct nouveau_pgraph_object_class nv10_graph_grclass[];
 extern int  nv10_graph_init(struct drm_device *);
 extern void nv10_graph_takedown(struct drm_device *);
 extern struct nouveau_channel *nv10_graph_channel(struct drm_device *);
@@ -1058,13 +1145,11 @@
 extern void nv10_graph_destroy_context(struct nouveau_channel *);
 extern int  nv10_graph_load_context(struct nouveau_channel *);
 extern int  nv10_graph_unload_context(struct drm_device *);
-extern void nv10_graph_context_switch(struct drm_device *);
-extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t,
-					 uint32_t, uint32_t);
+extern void nv10_graph_set_tile_region(struct drm_device *dev, int i);
+extern struct nouveau_bitfield nv10_graph_intr[];
+extern struct nouveau_bitfield nv10_graph_nstatus[];
 
 /* nv20_graph.c */
-extern struct nouveau_pgraph_object_class nv20_graph_grclass[];
-extern struct nouveau_pgraph_object_class nv30_graph_grclass[];
 extern int  nv20_graph_create_context(struct nouveau_channel *);
 extern void nv20_graph_destroy_context(struct nouveau_channel *);
 extern int  nv20_graph_load_context(struct nouveau_channel *);
@@ -1072,11 +1157,9 @@
 extern int  nv20_graph_init(struct drm_device *);
 extern void nv20_graph_takedown(struct drm_device *);
 extern int  nv30_graph_init(struct drm_device *);
-extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t,
-					 uint32_t, uint32_t);
+extern void nv20_graph_set_tile_region(struct drm_device *dev, int i);
 
 /* nv40_graph.c */
-extern struct nouveau_pgraph_object_class nv40_graph_grclass[];
 extern int  nv40_graph_init(struct drm_device *);
 extern void nv40_graph_takedown(struct drm_device *);
 extern struct nouveau_channel *nv40_graph_channel(struct drm_device *);
@@ -1085,11 +1168,9 @@
 extern int  nv40_graph_load_context(struct nouveau_channel *);
 extern int  nv40_graph_unload_context(struct drm_device *);
 extern void nv40_grctx_init(struct nouveau_grctx *);
-extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t,
-					 uint32_t, uint32_t);
+extern void nv40_graph_set_tile_region(struct drm_device *dev, int i);
 
 /* nv50_graph.c */
-extern struct nouveau_pgraph_object_class nv50_graph_grclass[];
 extern int  nv50_graph_init(struct drm_device *);
 extern void nv50_graph_takedown(struct drm_device *);
 extern void nv50_graph_fifo_access(struct drm_device *, bool);
@@ -1098,7 +1179,6 @@
 extern void nv50_graph_destroy_context(struct nouveau_channel *);
 extern int  nv50_graph_load_context(struct nouveau_channel *);
 extern int  nv50_graph_unload_context(struct drm_device *);
-extern void nv50_graph_context_switch(struct drm_device *);
 extern int  nv50_grctx_init(struct nouveau_grctx *);
 extern void nv50_graph_tlb_flush(struct drm_device *dev);
 extern void nv86_graph_tlb_flush(struct drm_device *dev);
@@ -1113,16 +1193,22 @@
 extern int  nvc0_graph_load_context(struct nouveau_channel *);
 extern int  nvc0_graph_unload_context(struct drm_device *);
 
+/* nv84_crypt.c */
+extern int  nv84_crypt_init(struct drm_device *dev);
+extern void nv84_crypt_fini(struct drm_device *dev);
+extern int  nv84_crypt_create_context(struct nouveau_channel *);
+extern void nv84_crypt_destroy_context(struct nouveau_channel *);
+extern void nv84_crypt_tlb_flush(struct drm_device *dev);
+
 /* nv04_instmem.c */
 extern int  nv04_instmem_init(struct drm_device *);
 extern void nv04_instmem_takedown(struct drm_device *);
 extern int  nv04_instmem_suspend(struct drm_device *);
 extern void nv04_instmem_resume(struct drm_device *);
-extern int  nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
-				  uint32_t *size);
-extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
-extern int  nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
-extern int  nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
+extern int  nv04_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
+extern void nv04_instmem_put(struct nouveau_gpuobj *);
+extern int  nv04_instmem_map(struct nouveau_gpuobj *);
+extern void nv04_instmem_unmap(struct nouveau_gpuobj *);
 extern void nv04_instmem_flush(struct drm_device *);
 
 /* nv50_instmem.c */
@@ -1130,25 +1216,22 @@
 extern void nv50_instmem_takedown(struct drm_device *);
 extern int  nv50_instmem_suspend(struct drm_device *);
 extern void nv50_instmem_resume(struct drm_device *);
-extern int  nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
-				  uint32_t *size);
-extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
-extern int  nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
-extern int  nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
+extern int  nv50_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
+extern void nv50_instmem_put(struct nouveau_gpuobj *);
+extern int  nv50_instmem_map(struct nouveau_gpuobj *);
+extern void nv50_instmem_unmap(struct nouveau_gpuobj *);
 extern void nv50_instmem_flush(struct drm_device *);
 extern void nv84_instmem_flush(struct drm_device *);
-extern void nv50_vm_flush(struct drm_device *, int engine);
 
 /* nvc0_instmem.c */
 extern int  nvc0_instmem_init(struct drm_device *);
 extern void nvc0_instmem_takedown(struct drm_device *);
 extern int  nvc0_instmem_suspend(struct drm_device *);
 extern void nvc0_instmem_resume(struct drm_device *);
-extern int  nvc0_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
-				  uint32_t *size);
-extern void nvc0_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
-extern int  nvc0_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
-extern int  nvc0_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
+extern int  nvc0_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
+extern void nvc0_instmem_put(struct nouveau_gpuobj *);
+extern int  nvc0_instmem_map(struct nouveau_gpuobj *);
+extern void nvc0_instmem_unmap(struct nouveau_gpuobj *);
 extern void nvc0_instmem_flush(struct drm_device *);
 
 /* nv04_mc.c */
@@ -1219,6 +1302,9 @@
 extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
 extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
 extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
+extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
+extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
+			       bool no_wait_reserve, bool no_wait_gpu);
 
 /* nouveau_fence.c */
 struct nouveau_fence;
@@ -1234,12 +1320,35 @@
 			       void (*work)(void *priv, bool signalled),
 			       void *priv);
 struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
-extern bool nouveau_fence_signalled(void *obj, void *arg);
-extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
+
+extern bool __nouveau_fence_signalled(void *obj, void *arg);
+extern int __nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
+extern int __nouveau_fence_flush(void *obj, void *arg);
+extern void __nouveau_fence_unref(void **obj);
+extern void *__nouveau_fence_ref(void *obj);
+
+static inline bool nouveau_fence_signalled(struct nouveau_fence *obj)
+{
+	return __nouveau_fence_signalled(obj, NULL);
+}
+static inline int
+nouveau_fence_wait(struct nouveau_fence *obj, bool lazy, bool intr)
+{
+	return __nouveau_fence_wait(obj, NULL, lazy, intr);
+}
 extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
-extern int nouveau_fence_flush(void *obj, void *arg);
-extern void nouveau_fence_unref(void **obj);
-extern void *nouveau_fence_ref(void *obj);
+static inline int nouveau_fence_flush(struct nouveau_fence *obj)
+{
+	return __nouveau_fence_flush(obj, NULL);
+}
+static inline void nouveau_fence_unref(struct nouveau_fence **obj)
+{
+	__nouveau_fence_unref((void **)obj);
+}
+static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
+{
+	return __nouveau_fence_ref(obj);
+}
 
 /* nouveau_gem.c */
 extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
@@ -1259,15 +1368,28 @@
 extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
 				  struct drm_file *);
 
+/* nouveau_display.c */
+int nouveau_vblank_enable(struct drm_device *dev, int crtc);
+void nouveau_vblank_disable(struct drm_device *dev, int crtc);
+int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+			   struct drm_pending_vblank_event *event);
+int nouveau_finish_page_flip(struct nouveau_channel *,
+			     struct nouveau_page_flip_state *);
+
 /* nv10_gpio.c */
 int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
 int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
 
 /* nv50_gpio.c */
 int nv50_gpio_init(struct drm_device *dev);
+void nv50_gpio_fini(struct drm_device *dev);
 int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
 int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
-void nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
+int  nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag,
+			    void (*)(void *, int), void *);
+void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag,
+			      void (*)(void *, int), void *);
+bool nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
 
 /* nv50_calc. */
 int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
@@ -1334,7 +1456,9 @@
 }
 
 #define nv_wait(dev, reg, mask, val) \
-	nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val))
+	nouveau_wait_eq(dev, 2000000000ULL, (reg), (mask), (val))
+#define nv_wait_ne(dev, reg, mask, val) \
+	nouveau_wait_ne(dev, 2000000000ULL, (reg), (mask), (val))
 
 /* PRAMIN access */
 static inline u32 nv_ri32(struct drm_device *dev, unsigned offset)
@@ -1447,6 +1571,23 @@
 		dev->pdev->subsystem_device == sub_device;
 }
 
+/* memory type/access flags, do not match hardware values */
+#define NV_MEM_ACCESS_RO  1
+#define NV_MEM_ACCESS_WO  2
+#define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO)
+#define NV_MEM_ACCESS_SYS 4
+#define NV_MEM_ACCESS_VM  8
+
+#define NV_MEM_TARGET_VRAM        0
+#define NV_MEM_TARGET_PCI         1
+#define NV_MEM_TARGET_PCI_NOSNOOP 2
+#define NV_MEM_TARGET_VM          3
+#define NV_MEM_TARGET_GART        4
+
+#define NV_MEM_TYPE_VM 0x7f
+#define NV_MEM_COMP_VM 0x03
+
+/* NV_SW object class */
 #define NV_SW                                                        0x0000506e
 #define NV_SW_DMA_SEMAPHORE                                          0x00000060
 #define NV_SW_SEMAPHORE_OFFSET                                       0x00000064
@@ -1457,5 +1598,6 @@
 #define NV_SW_VBLSEM_OFFSET                                          0x00000400
 #define NV_SW_VBLSEM_RELEASE_VALUE                                   0x00000404
 #define NV_SW_VBLSEM_RELEASE                                         0x00000408
+#define NV_SW_PAGE_FLIP                                              0x00000500
 
 #endif /* __NOUVEAU_DRV_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 02a4d1f..ea861c9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -49,6 +49,96 @@
 #include "nouveau_fbcon.h"
 #include "nouveau_dma.h"
 
+static void
+nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (info->state != FBINFO_STATE_RUNNING)
+		return;
+
+	ret = -ENODEV;
+	if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
+	    mutex_trylock(&dev_priv->channel->mutex)) {
+		if (dev_priv->card_type < NV_50)
+			ret = nv04_fbcon_fillrect(info, rect);
+		else
+		if (dev_priv->card_type < NV_C0)
+			ret = nv50_fbcon_fillrect(info, rect);
+		mutex_unlock(&dev_priv->channel->mutex);
+	}
+
+	if (ret == 0)
+		return;
+
+	if (ret != -ENODEV)
+		nouveau_fbcon_gpu_lockup(info);
+	cfb_fillrect(info, rect);
+}
+
+static void
+nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (info->state != FBINFO_STATE_RUNNING)
+		return;
+
+	ret = -ENODEV;
+	if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
+	    mutex_trylock(&dev_priv->channel->mutex)) {
+		if (dev_priv->card_type < NV_50)
+			ret = nv04_fbcon_copyarea(info, image);
+		else
+		if (dev_priv->card_type < NV_C0)
+			ret = nv50_fbcon_copyarea(info, image);
+		mutex_unlock(&dev_priv->channel->mutex);
+	}
+
+	if (ret == 0)
+		return;
+
+	if (ret != -ENODEV)
+		nouveau_fbcon_gpu_lockup(info);
+	cfb_copyarea(info, image);
+}
+
+static void
+nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (info->state != FBINFO_STATE_RUNNING)
+		return;
+
+	ret = -ENODEV;
+	if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
+	    mutex_trylock(&dev_priv->channel->mutex)) {
+		if (dev_priv->card_type < NV_50)
+			ret = nv04_fbcon_imageblit(info, image);
+		else
+		if (dev_priv->card_type < NV_C0)
+			ret = nv50_fbcon_imageblit(info, image);
+		mutex_unlock(&dev_priv->channel->mutex);
+	}
+
+	if (ret == 0)
+		return;
+
+	if (ret != -ENODEV)
+		nouveau_fbcon_gpu_lockup(info);
+	cfb_imageblit(info, image);
+}
+
 static int
 nouveau_fbcon_sync(struct fb_info *info)
 {
@@ -58,12 +148,17 @@
 	struct nouveau_channel *chan = dev_priv->channel;
 	int ret, i;
 
-	if (!chan || !chan->accel_done ||
+	if (!chan || !chan->accel_done || in_interrupt() ||
 	    info->state != FBINFO_STATE_RUNNING ||
 	    info->flags & FBINFO_HWACCEL_DISABLED)
 		return 0;
 
-	if (RING_SPACE(chan, 4)) {
+	if (!mutex_trylock(&chan->mutex))
+		return 0;
+
+	ret = RING_SPACE(chan, 4);
+	if (ret) {
+		mutex_unlock(&chan->mutex);
 		nouveau_fbcon_gpu_lockup(info);
 		return 0;
 	}
@@ -74,6 +169,7 @@
 	OUT_RING(chan, 0);
 	nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff);
 	FIRE_RING(chan);
+	mutex_unlock(&chan->mutex);
 
 	ret = -EBUSY;
 	for (i = 0; i < 100000; i++) {
@@ -97,40 +193,24 @@
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
+	.fb_fillrect = nouveau_fbcon_fillrect,
+	.fb_copyarea = nouveau_fbcon_copyarea,
+	.fb_imageblit = nouveau_fbcon_imageblit,
+	.fb_sync = nouveau_fbcon_sync,
+	.fb_pan_display = drm_fb_helper_pan_display,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcmap = drm_fb_helper_setcmap,
+	.fb_debug_enter = drm_fb_helper_debug_enter,
+	.fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static struct fb_ops nouveau_fbcon_sw_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par,
 	.fb_fillrect = cfb_fillrect,
 	.fb_copyarea = cfb_copyarea,
 	.fb_imageblit = cfb_imageblit,
-	.fb_sync = nouveau_fbcon_sync,
-	.fb_pan_display = drm_fb_helper_pan_display,
-	.fb_blank = drm_fb_helper_blank,
-	.fb_setcmap = drm_fb_helper_setcmap,
-	.fb_debug_enter = drm_fb_helper_debug_enter,
-	.fb_debug_leave = drm_fb_helper_debug_leave,
-};
-
-static struct fb_ops nv04_fbcon_ops = {
-	.owner = THIS_MODULE,
-	.fb_check_var = drm_fb_helper_check_var,
-	.fb_set_par = drm_fb_helper_set_par,
-	.fb_fillrect = nv04_fbcon_fillrect,
-	.fb_copyarea = nv04_fbcon_copyarea,
-	.fb_imageblit = nv04_fbcon_imageblit,
-	.fb_sync = nouveau_fbcon_sync,
-	.fb_pan_display = drm_fb_helper_pan_display,
-	.fb_blank = drm_fb_helper_blank,
-	.fb_setcmap = drm_fb_helper_setcmap,
-	.fb_debug_enter = drm_fb_helper_debug_enter,
-	.fb_debug_leave = drm_fb_helper_debug_leave,
-};
-
-static struct fb_ops nv50_fbcon_ops = {
-	.owner = THIS_MODULE,
-	.fb_check_var = drm_fb_helper_check_var,
-	.fb_set_par = drm_fb_helper_set_par,
-	.fb_fillrect = nv50_fbcon_fillrect,
-	.fb_copyarea = nv50_fbcon_copyarea,
-	.fb_imageblit = nv50_fbcon_imageblit,
-	.fb_sync = nouveau_fbcon_sync,
 	.fb_pan_display = drm_fb_helper_pan_display,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcmap = drm_fb_helper_setcmap,
@@ -257,9 +337,9 @@
 			      FBINFO_HWACCEL_FILLRECT |
 			      FBINFO_HWACCEL_IMAGEBLIT;
 	info->flags |= FBINFO_CAN_FORCE_OUTPUT;
-	info->fbops = &nouveau_fbcon_ops;
-	info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
-			       dev_priv->vm_vram_base;
+	info->fbops = &nouveau_fbcon_sw_ops;
+	info->fix.smem_start = dev->mode_config.fb_base +
+			       (nvbo->bo.mem.start << PAGE_SHIFT);
 	info->fix.smem_len = size;
 
 	info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
@@ -285,19 +365,18 @@
 	info->pixmap.flags = FB_PIXMAP_SYSTEM;
 	info->pixmap.scan_align = 1;
 
+	mutex_unlock(&dev->struct_mutex);
+
 	if (dev_priv->channel && !nouveau_nofbaccel) {
-		switch (dev_priv->card_type) {
-		case NV_C0:
-			break;
-		case NV_50:
-			nv50_fbcon_accel_init(info);
-			info->fbops = &nv50_fbcon_ops;
-			break;
-		default:
-			nv04_fbcon_accel_init(info);
-			info->fbops = &nv04_fbcon_ops;
-			break;
-		};
+		ret = -ENODEV;
+		if (dev_priv->card_type < NV_50)
+			ret = nv04_fbcon_accel_init(info);
+		else
+		if (dev_priv->card_type < NV_C0)
+			ret = nv50_fbcon_accel_init(info);
+
+		if (ret == 0)
+			info->fbops = &nouveau_fbcon_ops;
 	}
 
 	nouveau_fbcon_zfill(dev, nfbdev);
@@ -308,7 +387,6 @@
 						nouveau_fb->base.height,
 						nvbo->bo.offset, nvbo);
 
-	mutex_unlock(&dev->struct_mutex);
 	vga_switcheroo_client_fb_set(dev->pdev, info);
 	return 0;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index e7e1268..6b933f2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -40,13 +40,13 @@
 
 void nouveau_fbcon_restore(void);
 
-void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
-void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
-void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
+int nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+int nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+int nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
 int nv04_fbcon_accel_init(struct fb_info *info);
-void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
-void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
-void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
+int nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+int nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+int nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
 int nv50_fbcon_accel_init(struct fb_info *info);
 
 void nouveau_fbcon_gpu_lockup(struct fb_info *info);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index ab1bbfb..374a979 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -64,6 +64,7 @@
 	struct nouveau_fence *fence =
 		container_of(ref, struct nouveau_fence, refcount);
 
+	nouveau_channel_ref(NULL, &fence->channel);
 	kfree(fence);
 }
 
@@ -76,14 +77,17 @@
 
 	spin_lock(&chan->fence.lock);
 
-	if (USE_REFCNT(dev))
-		sequence = nvchan_rd32(chan, 0x48);
-	else
-		sequence = atomic_read(&chan->fence.last_sequence_irq);
+	/* Fetch the last sequence if the channel is still up and running */
+	if (likely(!list_empty(&chan->fence.pending))) {
+		if (USE_REFCNT(dev))
+			sequence = nvchan_rd32(chan, 0x48);
+		else
+			sequence = atomic_read(&chan->fence.last_sequence_irq);
 
-	if (chan->fence.sequence_ack == sequence)
-		goto out;
-	chan->fence.sequence_ack = sequence;
+		if (chan->fence.sequence_ack == sequence)
+			goto out;
+		chan->fence.sequence_ack = sequence;
+	}
 
 	list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
 		sequence = fence->sequence;
@@ -113,13 +117,13 @@
 	if (!fence)
 		return -ENOMEM;
 	kref_init(&fence->refcount);
-	fence->channel = chan;
+	nouveau_channel_ref(chan, &fence->channel);
 
 	if (emit)
 		ret = nouveau_fence_emit(fence);
 
 	if (ret)
-		nouveau_fence_unref((void *)&fence);
+		nouveau_fence_unref(&fence);
 	*pfence = fence;
 	return ret;
 }
@@ -127,7 +131,7 @@
 struct nouveau_channel *
 nouveau_fence_channel(struct nouveau_fence *fence)
 {
-	return fence ? fence->channel : NULL;
+	return fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
 }
 
 int
@@ -182,7 +186,7 @@
 }
 
 void
-nouveau_fence_unref(void **sync_obj)
+__nouveau_fence_unref(void **sync_obj)
 {
 	struct nouveau_fence *fence = nouveau_fence(*sync_obj);
 
@@ -192,7 +196,7 @@
 }
 
 void *
-nouveau_fence_ref(void *sync_obj)
+__nouveau_fence_ref(void *sync_obj)
 {
 	struct nouveau_fence *fence = nouveau_fence(sync_obj);
 
@@ -201,7 +205,7 @@
 }
 
 bool
-nouveau_fence_signalled(void *sync_obj, void *sync_arg)
+__nouveau_fence_signalled(void *sync_obj, void *sync_arg)
 {
 	struct nouveau_fence *fence = nouveau_fence(sync_obj);
 	struct nouveau_channel *chan = fence->channel;
@@ -214,13 +218,14 @@
 }
 
 int
-nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
+__nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
 {
 	unsigned long timeout = jiffies + (3 * DRM_HZ);
+	unsigned long sleep_time = jiffies + 1;
 	int ret = 0;
 
 	while (1) {
-		if (nouveau_fence_signalled(sync_obj, sync_arg))
+		if (__nouveau_fence_signalled(sync_obj, sync_arg))
 			break;
 
 		if (time_after_eq(jiffies, timeout)) {
@@ -230,7 +235,7 @@
 
 		__set_current_state(intr ? TASK_INTERRUPTIBLE
 			: TASK_UNINTERRUPTIBLE);
-		if (lazy)
+		if (lazy && time_after_eq(jiffies, sleep_time))
 			schedule_timeout(1);
 
 		if (intr && signal_pending(current)) {
@@ -368,7 +373,7 @@
 
 	kref_get(&sema->ref);
 	nouveau_fence_work(fence, semaphore_work, sema);
-	nouveau_fence_unref((void *)&fence);
+	nouveau_fence_unref(&fence);
 
 	return 0;
 }
@@ -380,33 +385,49 @@
 	struct nouveau_channel *chan = nouveau_fence_channel(fence);
 	struct drm_device *dev = wchan->dev;
 	struct nouveau_semaphore *sema;
-	int ret;
+	int ret = 0;
 
-	if (likely(!fence || chan == wchan ||
-		   nouveau_fence_signalled(fence, NULL)))
-		return 0;
+	if (likely(!chan || chan == wchan ||
+		   nouveau_fence_signalled(fence)))
+		goto out;
 
 	sema = alloc_semaphore(dev);
 	if (!sema) {
 		/* Early card or broken userspace, fall back to
 		 * software sync. */
-		return nouveau_fence_wait(fence, NULL, false, false);
+		ret = nouveau_fence_wait(fence, true, false);
+		goto out;
+	}
+
+	/* try to take chan's mutex, if we can't take it right away
+	 * we have to fallback to software sync to prevent locking
+	 * order issues
+	 */
+	if (!mutex_trylock(&chan->mutex)) {
+		ret = nouveau_fence_wait(fence, true, false);
+		goto out_unref;
 	}
 
 	/* Make wchan wait until it gets signalled */
 	ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema);
 	if (ret)
-		goto out;
+		goto out_unlock;
 
 	/* Signal the semaphore from chan */
 	ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema);
-out:
+
+out_unlock:
+	mutex_unlock(&chan->mutex);
+out_unref:
 	kref_put(&sema->ref, free_semaphore);
+out:
+	if (chan)
+		nouveau_channel_put_unlocked(&chan);
 	return ret;
 }
 
 int
-nouveau_fence_flush(void *sync_obj, void *sync_arg)
+__nouveau_fence_flush(void *sync_obj, void *sync_arg)
 {
 	return 0;
 }
@@ -420,12 +441,7 @@
 	int ret;
 
 	/* Create an NV_SW object for various sync purposes */
-	ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj);
-	if (ret)
-		return ret;
-
-	ret = nouveau_ramht_insert(chan, NvSw, obj);
-	nouveau_gpuobj_ref(NULL, &obj);
+	ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
 	if (ret)
 		return ret;
 
@@ -437,13 +453,12 @@
 
 	/* Create a DMA object for the shared cross-channel sync area. */
 	if (USE_SEMA(dev)) {
-		struct drm_mm_node *mem = dev_priv->fence.bo->bo.mem.mm_node;
+		struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
 
 		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
 					     mem->start << PAGE_SHIFT,
-					     mem->size << PAGE_SHIFT,
-					     NV_DMA_ACCESS_RW,
-					     NV_DMA_TARGET_VIDMEM, &obj);
+					     mem->size, NV_MEM_ACCESS_RW,
+					     NV_MEM_TARGET_VRAM, &obj);
 		if (ret)
 			return ret;
 
@@ -473,6 +488,8 @@
 {
 	struct nouveau_fence *tmp, *fence;
 
+	spin_lock(&chan->fence.lock);
+
 	list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
 		fence->signalled = true;
 		list_del(&fence->entry);
@@ -482,6 +499,8 @@
 
 		kref_put(&fence->refcount, nouveau_fence_del);
 	}
+
+	spin_unlock(&chan->fence.lock);
 }
 
 int
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 1f2301d..506c508 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -48,9 +48,6 @@
 		return;
 	nvbo->gem = NULL;
 
-	if (unlikely(nvbo->cpu_filp))
-		ttm_bo_synccpu_write_release(bo);
-
 	if (unlikely(nvbo->pin_refcnt)) {
 		nvbo->pin_refcnt = 1;
 		nouveau_bo_unpin(nvbo);
@@ -106,32 +103,6 @@
 	return 0;
 }
 
-static bool
-nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	if (dev_priv->card_type >= NV_50) {
-		switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) {
-		case 0x0000:
-		case 0x1800:
-		case 0x2800:
-		case 0x4800:
-		case 0x7000:
-		case 0x7400:
-		case 0x7a00:
-		case 0xe000:
-			return true;
-		}
-	} else {
-		if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
-			return true;
-	}
-
-	NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
-	return false;
-}
-
 int
 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
 		      struct drm_file *file_priv)
@@ -146,11 +117,6 @@
 	if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
 		dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
 
-	if (req->channel_hint) {
-		NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint,
-						     file_priv, chan);
-	}
-
 	if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
 		flags |= TTM_PL_FLAG_VRAM;
 	if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
@@ -158,13 +124,23 @@
 	if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
 		flags |= TTM_PL_FLAG_SYSTEM;
 
-	if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags))
+	if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
+		NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
 		return -EINVAL;
+	}
+
+	if (req->channel_hint) {
+		chan = nouveau_channel_get(dev, file_priv, req->channel_hint);
+		if (IS_ERR(chan))
+			return PTR_ERR(chan);
+	}
 
 	ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
 			      req->info.tile_mode, req->info.tile_flags, false,
 			      (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
 			      &nvbo);
+	if (chan)
+		nouveau_channel_put(&chan);
 	if (ret)
 		return ret;
 
@@ -231,15 +207,8 @@
 
 	list_for_each_safe(entry, tmp, list) {
 		nvbo = list_entry(entry, struct nouveau_bo, entry);
-		if (likely(fence)) {
-			struct nouveau_fence *prev_fence;
 
-			spin_lock(&nvbo->bo.bdev->fence_lock);
-			prev_fence = nvbo->bo.sync_obj;
-			nvbo->bo.sync_obj = nouveau_fence_ref(fence);
-			spin_unlock(&nvbo->bo.bdev->fence_lock);
-			nouveau_fence_unref((void *)&prev_fence);
-		}
+		nouveau_bo_fence(nvbo, fence);
 
 		if (unlikely(nvbo->validate_mapped)) {
 			ttm_bo_kunmap(&nvbo->kmap);
@@ -299,14 +268,15 @@
 			return -EINVAL;
 		}
 
-		ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence);
+		ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
 		if (ret) {
 			validate_fini(op, NULL);
-			if (ret == -EAGAIN)
-				ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
+			if (unlikely(ret == -EAGAIN))
+				ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
 			drm_gem_object_unreference_unlocked(gem);
-			if (ret) {
-				NV_ERROR(dev, "fail reserve\n");
+			if (unlikely(ret)) {
+				if (ret != -ERESTARTSYS)
+					NV_ERROR(dev, "fail reserve\n");
 				return ret;
 			}
 			goto retry;
@@ -331,25 +301,6 @@
 			validate_fini(op, NULL);
 			return -EINVAL;
 		}
-
-		if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) {
-			validate_fini(op, NULL);
-
-			if (nvbo->cpu_filp == file_priv) {
-				NV_ERROR(dev, "bo %p mapped by process trying "
-					      "to validate it!\n", nvbo);
-				return -EINVAL;
-			}
-
-			mutex_unlock(&drm_global_mutex);
-			ret = ttm_bo_wait_cpu(&nvbo->bo, false);
-			mutex_lock(&drm_global_mutex);
-			if (ret) {
-				NV_ERROR(dev, "fail wait_cpu\n");
-				return ret;
-			}
-			goto retry;
-		}
 	}
 
 	return 0;
@@ -383,11 +334,11 @@
 		}
 
 		nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
-		ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
-				      false, false, false);
+		ret = nouveau_bo_validate(nvbo, true, false, false);
 		nvbo->channel = NULL;
 		if (unlikely(ret)) {
-			NV_ERROR(dev, "fail ttm_validate\n");
+			if (ret != -ERESTARTSYS)
+				NV_ERROR(dev, "fail ttm_validate\n");
 			return ret;
 		}
 
@@ -439,13 +390,15 @@
 
 	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
 	if (unlikely(ret)) {
-		NV_ERROR(dev, "validate_init\n");
+		if (ret != -ERESTARTSYS)
+			NV_ERROR(dev, "validate_init\n");
 		return ret;
 	}
 
 	ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
 	if (unlikely(ret < 0)) {
-		NV_ERROR(dev, "validate vram_list\n");
+		if (ret != -ERESTARTSYS)
+			NV_ERROR(dev, "validate vram_list\n");
 		validate_fini(op, NULL);
 		return ret;
 	}
@@ -453,7 +406,8 @@
 
 	ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
 	if (unlikely(ret < 0)) {
-		NV_ERROR(dev, "validate gart_list\n");
+		if (ret != -ERESTARTSYS)
+			NV_ERROR(dev, "validate gart_list\n");
 		validate_fini(op, NULL);
 		return ret;
 	}
@@ -461,7 +415,8 @@
 
 	ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
 	if (unlikely(ret < 0)) {
-		NV_ERROR(dev, "validate both_list\n");
+		if (ret != -ERESTARTSYS)
+			NV_ERROR(dev, "validate both_list\n");
 		validate_fini(op, NULL);
 		return ret;
 	}
@@ -585,7 +540,9 @@
 	struct nouveau_fence *fence = NULL;
 	int i, j, ret = 0, do_reloc = 0;
 
-	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
+	chan = nouveau_channel_get(dev, file_priv, req->channel);
+	if (IS_ERR(chan))
+		return PTR_ERR(chan);
 
 	req->vram_available = dev_priv->fb_aper_free;
 	req->gart_available = dev_priv->gart_info.aper_free;
@@ -595,28 +552,34 @@
 	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
 		NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
 			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
+		nouveau_channel_put(&chan);
 		return -EINVAL;
 	}
 
 	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
 		NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
 			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
+		nouveau_channel_put(&chan);
 		return -EINVAL;
 	}
 
 	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
 		NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
 			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
+		nouveau_channel_put(&chan);
 		return -EINVAL;
 	}
 
 	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
-	if (IS_ERR(push))
+	if (IS_ERR(push)) {
+		nouveau_channel_put(&chan);
 		return PTR_ERR(push);
+	}
 
 	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
 	if (IS_ERR(bo)) {
 		kfree(push);
+		nouveau_channel_put(&chan);
 		return PTR_ERR(bo);
 	}
 
@@ -639,7 +602,8 @@
 	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
 					   req->nr_buffers, &op, &do_reloc);
 	if (ret) {
-		NV_ERROR(dev, "validate: %d\n", ret);
+		if (ret != -ERESTARTSYS)
+			NV_ERROR(dev, "validate: %d\n", ret);
 		goto out;
 	}
 
@@ -732,7 +696,7 @@
 
 out:
 	validate_fini(&op, fence);
-	nouveau_fence_unref((void**)&fence);
+	nouveau_fence_unref(&fence);
 	kfree(bo);
 	kfree(push);
 
@@ -750,6 +714,7 @@
 		req->suffix1 = 0x00000000;
 	}
 
+	nouveau_channel_put(&chan);
 	return ret;
 }
 
@@ -781,26 +746,9 @@
 		return -ENOENT;
 	nvbo = nouveau_gem_object(gem);
 
-	if (nvbo->cpu_filp) {
-		if (nvbo->cpu_filp == file_priv)
-			goto out;
-
-		ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
-		if (ret)
-			goto out;
-	}
-
-	if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
-		spin_lock(&nvbo->bo.bdev->fence_lock);
-		ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
-		spin_unlock(&nvbo->bo.bdev->fence_lock);
-	} else {
-		ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
-		if (ret == 0)
-			nvbo->cpu_filp = file_priv;
-	}
-
-out:
+	spin_lock(&nvbo->bo.bdev->fence_lock);
+	ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
+	spin_unlock(&nvbo->bo.bdev->fence_lock);
 	drm_gem_object_unreference_unlocked(gem);
 	return ret;
 }
@@ -809,26 +757,7 @@
 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
 			   struct drm_file *file_priv)
 {
-	struct drm_nouveau_gem_cpu_prep *req = data;
-	struct drm_gem_object *gem;
-	struct nouveau_bo *nvbo;
-	int ret = -EINVAL;
-
-	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
-	if (!gem)
-		return -ENOENT;
-	nvbo = nouveau_gem_object(gem);
-
-	if (nvbo->cpu_filp != file_priv)
-		goto out;
-	nvbo->cpu_filp = NULL;
-
-	ttm_bo_synccpu_write_release(&nvbo->bo);
-	ret = 0;
-
-out:
-	drm_gem_object_unreference_unlocked(gem);
-	return ret;
+	return 0;
 }
 
 int
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index b9672a0..053edf9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -953,7 +953,7 @@
 			NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
 
 			reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
-			if (regp->crtc_cfg == NV_PCRTC_CONFIG_START_ADDRESS_HSYNC)
+			if (regp->crtc_cfg == NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC)
 				NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000);
 			else
 				NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000);
@@ -999,8 +999,8 @@
 		if (dev_priv->card_type == NV_10) {
 			/* Not waiting for vertical retrace before modifying
 			   CRE_53/CRE_54 causes lockups. */
-			nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
-			nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
+			nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
+			nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
 		}
 
 		wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
@@ -1017,8 +1017,9 @@
 
 	NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
 
-	/* Setting 1 on this value gives you interrupts for every vblank period. */
-	NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, 0);
+	/* Enable vblank interrupts. */
+	NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0,
+		    (dev->vblank_enabled[head] ? 1 : 0));
 	NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK);
 }
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 7bfd9e6..2ba7265 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -36,18 +36,7 @@
 #include "nouveau_drv.h"
 #include "nouveau_reg.h"
 #include "nouveau_ramht.h"
-#include <linux/ratelimit.h>
-
-/* needed for hotplug irq */
-#include "nouveau_connector.h"
-#include "nv50_display.h"
-
-static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
-
-static int nouveau_ratelimit(void)
-{
-	return __ratelimit(&nouveau_ratelimit_state);
-}
+#include "nouveau_util.h"
 
 void
 nouveau_irq_preinstall(struct drm_device *dev)
@@ -57,19 +46,19 @@
 	/* Master disable */
 	nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
 
-	if (dev_priv->card_type >= NV_50) {
-		INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
-		INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
-		spin_lock_init(&dev_priv->hpd_state.lock);
-		INIT_LIST_HEAD(&dev_priv->vbl_waiting);
-	}
+	INIT_LIST_HEAD(&dev_priv->vbl_waiting);
 }
 
 int
 nouveau_irq_postinstall(struct drm_device *dev)
 {
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
 	/* Master enable */
 	nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
+	if (dev_priv->msi_enabled)
+		nv_wr08(dev, 0x00088068, 0xff);
+
 	return 0;
 }
 
@@ -80,1178 +69,83 @@
 	nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
 }
 
-static int
-nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data)
-{
-	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
-	struct nouveau_pgraph_object_method *grm;
-	struct nouveau_pgraph_object_class *grc;
-
-	grc = dev_priv->engine.graph.grclass;
-	while (grc->id) {
-		if (grc->id == class)
-			break;
-		grc++;
-	}
-
-	if (grc->id != class || !grc->methods)
-		return -ENOENT;
-
-	grm = grc->methods;
-	while (grm->id) {
-		if (grm->id == mthd)
-			return grm->exec(chan, class, mthd, data);
-		grm++;
-	}
-
-	return -ENOENT;
-}
-
-static bool
-nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
-{
-	struct drm_device *dev = chan->dev;
-	const int subc = (addr >> 13) & 0x7;
-	const int mthd = addr & 0x1ffc;
-
-	if (mthd == 0x0000) {
-		struct nouveau_gpuobj *gpuobj;
-
-		gpuobj = nouveau_ramht_find(chan, data);
-		if (!gpuobj)
-			return false;
-
-		if (gpuobj->engine != NVOBJ_ENGINE_SW)
-			return false;
-
-		chan->sw_subchannel[subc] = gpuobj->class;
-		nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
-			NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
-		return true;
-	}
-
-	/* hw object */
-	if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4)))
-		return false;
-
-	if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data))
-		return false;
-
-	return true;
-}
-
-static void
-nouveau_fifo_irq_handler(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_engine *engine = &dev_priv->engine;
-	uint32_t status, reassign;
-	int cnt = 0;
-
-	reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
-	while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
-		struct nouveau_channel *chan = NULL;
-		uint32_t chid, get;
-
-		nv_wr32(dev, NV03_PFIFO_CACHES, 0);
-
-		chid = engine->fifo.channel_id(dev);
-		if (chid >= 0 && chid < engine->fifo.channels)
-			chan = dev_priv->fifos[chid];
-		get  = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
-
-		if (status & NV_PFIFO_INTR_CACHE_ERROR) {
-			uint32_t mthd, data;
-			int ptr;
-
-			/* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
-			 * wrapping on my G80 chips, but CACHE1 isn't big
-			 * enough for this much data.. Tests show that it
-			 * wraps around to the start at GET=0x800.. No clue
-			 * as to why..
-			 */
-			ptr = (get & 0x7ff) >> 2;
-
-			if (dev_priv->card_type < NV_40) {
-				mthd = nv_rd32(dev,
-					NV04_PFIFO_CACHE1_METHOD(ptr));
-				data = nv_rd32(dev,
-					NV04_PFIFO_CACHE1_DATA(ptr));
-			} else {
-				mthd = nv_rd32(dev,
-					NV40_PFIFO_CACHE1_METHOD(ptr));
-				data = nv_rd32(dev,
-					NV40_PFIFO_CACHE1_DATA(ptr));
-			}
-
-			if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) {
-				NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
-					     "Mthd 0x%04x Data 0x%08x\n",
-					chid, (mthd >> 13) & 7, mthd & 0x1ffc,
-					data);
-			}
-
-			nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
-			nv_wr32(dev, NV03_PFIFO_INTR_0,
-						NV_PFIFO_INTR_CACHE_ERROR);
-
-			nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
-				nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
-			nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
-			nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
-				nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
-			nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
-
-			nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
-				nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
-			nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
-
-			status &= ~NV_PFIFO_INTR_CACHE_ERROR;
-		}
-
-		if (status & NV_PFIFO_INTR_DMA_PUSHER) {
-			u32 dma_get = nv_rd32(dev, 0x003244);
-			u32 dma_put = nv_rd32(dev, 0x003240);
-			u32 push = nv_rd32(dev, 0x003220);
-			u32 state = nv_rd32(dev, 0x003228);
-
-			if (dev_priv->card_type == NV_50) {
-				u32 ho_get = nv_rd32(dev, 0x003328);
-				u32 ho_put = nv_rd32(dev, 0x003320);
-				u32 ib_get = nv_rd32(dev, 0x003334);
-				u32 ib_put = nv_rd32(dev, 0x003330);
-
-				if (nouveau_ratelimit())
-					NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
-					     "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
-					     "State 0x%08x Push 0x%08x\n",
-						chid, ho_get, dma_get, ho_put,
-						dma_put, ib_get, ib_put, state,
-						push);
-
-				/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
-				nv_wr32(dev, 0x003364, 0x00000000);
-				if (dma_get != dma_put || ho_get != ho_put) {
-					nv_wr32(dev, 0x003244, dma_put);
-					nv_wr32(dev, 0x003328, ho_put);
-				} else
-				if (ib_get != ib_put) {
-					nv_wr32(dev, 0x003334, ib_put);
-				}
-			} else {
-				NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
-					     "Put 0x%08x State 0x%08x Push 0x%08x\n",
-					chid, dma_get, dma_put, state, push);
-
-				if (dma_get != dma_put)
-					nv_wr32(dev, 0x003244, dma_put);
-			}
-
-			nv_wr32(dev, 0x003228, 0x00000000);
-			nv_wr32(dev, 0x003220, 0x00000001);
-			nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
-			status &= ~NV_PFIFO_INTR_DMA_PUSHER;
-		}
-
-		if (status & NV_PFIFO_INTR_SEMAPHORE) {
-			uint32_t sem;
-
-			status &= ~NV_PFIFO_INTR_SEMAPHORE;
-			nv_wr32(dev, NV03_PFIFO_INTR_0,
-				NV_PFIFO_INTR_SEMAPHORE);
-
-			sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
-			nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
-
-			nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
-			nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
-		}
-
-		if (dev_priv->card_type == NV_50) {
-			if (status & 0x00000010) {
-				nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
-				status &= ~0x00000010;
-				nv_wr32(dev, 0x002100, 0x00000010);
-			}
-		}
-
-		if (status) {
-			if (nouveau_ratelimit())
-				NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
-					status, chid);
-			nv_wr32(dev, NV03_PFIFO_INTR_0, status);
-			status = 0;
-		}
-
-		nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
-	}
-
-	if (status) {
-		NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
-		nv_wr32(dev, 0x2140, 0);
-		nv_wr32(dev, 0x140, 0);
-	}
-
-	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
-}
-
-struct nouveau_bitfield_names {
-	uint32_t mask;
-	const char *name;
-};
-
-static struct nouveau_bitfield_names nstatus_names[] =
-{
-	{ NV04_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
-	{ NV04_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
-	{ NV04_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
-	{ NV04_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" }
-};
-
-static struct nouveau_bitfield_names nstatus_names_nv10[] =
-{
-	{ NV10_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
-	{ NV10_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
-	{ NV10_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
-	{ NV10_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" }
-};
-
-static struct nouveau_bitfield_names nsource_names[] =
-{
-	{ NV03_PGRAPH_NSOURCE_NOTIFICATION,       "NOTIFICATION" },
-	{ NV03_PGRAPH_NSOURCE_DATA_ERROR,         "DATA_ERROR" },
-	{ NV03_PGRAPH_NSOURCE_PROTECTION_ERROR,   "PROTECTION_ERROR" },
-	{ NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION,    "RANGE_EXCEPTION" },
-	{ NV03_PGRAPH_NSOURCE_LIMIT_COLOR,        "LIMIT_COLOR" },
-	{ NV03_PGRAPH_NSOURCE_LIMIT_ZETA,         "LIMIT_ZETA" },
-	{ NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD,       "ILLEGAL_MTHD" },
-	{ NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION,   "DMA_R_PROTECTION" },
-	{ NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION,   "DMA_W_PROTECTION" },
-	{ NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION,   "FORMAT_EXCEPTION" },
-	{ NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION,    "PATCH_EXCEPTION" },
-	{ NV03_PGRAPH_NSOURCE_STATE_INVALID,      "STATE_INVALID" },
-	{ NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY,      "DOUBLE_NOTIFY" },
-	{ NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE,      "NOTIFY_IN_USE" },
-	{ NV03_PGRAPH_NSOURCE_METHOD_CNT,         "METHOD_CNT" },
-	{ NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION,   "BFR_NOTIFICATION" },
-	{ NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
-	{ NV03_PGRAPH_NSOURCE_DMA_WIDTH_A,        "DMA_WIDTH_A" },
-	{ NV03_PGRAPH_NSOURCE_DMA_WIDTH_B,        "DMA_WIDTH_B" },
-};
-
-static void
-nouveau_print_bitfield_names_(uint32_t value,
-				const struct nouveau_bitfield_names *namelist,
-				const int namelist_len)
-{
-	/*
-	 * Caller must have already printed the KERN_* log level for us.
-	 * Also the caller is responsible for adding the newline.
-	 */
-	int i;
-	for (i = 0; i < namelist_len; ++i) {
-		uint32_t mask = namelist[i].mask;
-		if (value & mask) {
-			printk(" %s", namelist[i].name);
-			value &= ~mask;
-		}
-	}
-	if (value)
-		printk(" (unknown bits 0x%08x)", value);
-}
-#define nouveau_print_bitfield_names(val, namelist) \
-	nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
-
-struct nouveau_enum_names {
-	uint32_t value;
-	const char *name;
-};
-
-static void
-nouveau_print_enum_names_(uint32_t value,
-				const struct nouveau_enum_names *namelist,
-				const int namelist_len)
-{
-	/*
-	 * Caller must have already printed the KERN_* log level for us.
-	 * Also the caller is responsible for adding the newline.
-	 */
-	int i;
-	for (i = 0; i < namelist_len; ++i) {
-		if (value == namelist[i].value) {
-			printk("%s", namelist[i].name);
-			return;
-		}
-	}
-	printk("unknown value 0x%08x", value);
-}
-#define nouveau_print_enum_names(val, namelist) \
-	nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
-
-static int
-nouveau_graph_chid_from_grctx(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t inst;
-	int i;
-
-	if (dev_priv->card_type < NV_40)
-		return dev_priv->engine.fifo.channels;
-	else
-	if (dev_priv->card_type < NV_50) {
-		inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
-
-		for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
-			struct nouveau_channel *chan = dev_priv->fifos[i];
-
-			if (!chan || !chan->ramin_grctx)
-				continue;
-
-			if (inst == chan->ramin_grctx->pinst)
-				break;
-		}
-	} else {
-		inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
-
-		for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
-			struct nouveau_channel *chan = dev_priv->fifos[i];
-
-			if (!chan || !chan->ramin)
-				continue;
-
-			if (inst == chan->ramin->vinst)
-				break;
-		}
-	}
-
-
-	return i;
-}
-
-static int
-nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_engine *engine = &dev_priv->engine;
-	int channel;
-
-	if (dev_priv->card_type < NV_10)
-		channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
-	else
-	if (dev_priv->card_type < NV_40)
-		channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
-	else
-		channel = nouveau_graph_chid_from_grctx(dev);
-
-	if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
-		NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
-		return -EINVAL;
-	}
-
-	*channel_ret = channel;
-	return 0;
-}
-
-struct nouveau_pgraph_trap {
-	int channel;
-	int class;
-	int subc, mthd, size;
-	uint32_t data, data2;
-	uint32_t nsource, nstatus;
-};
-
-static void
-nouveau_graph_trap_info(struct drm_device *dev,
-			struct nouveau_pgraph_trap *trap)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t address;
-
-	trap->nsource = trap->nstatus = 0;
-	if (dev_priv->card_type < NV_50) {
-		trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
-		trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
-	}
-
-	if (nouveau_graph_trapped_channel(dev, &trap->channel))
-		trap->channel = -1;
-	address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
-
-	trap->mthd = address & 0x1FFC;
-	trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
-	if (dev_priv->card_type < NV_10) {
-		trap->subc  = (address >> 13) & 0x7;
-	} else {
-		trap->subc  = (address >> 16) & 0x7;
-		trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
-	}
-
-	if (dev_priv->card_type < NV_10)
-		trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
-	else if (dev_priv->card_type < NV_40)
-		trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
-	else if (dev_priv->card_type < NV_50)
-		trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
-	else
-		trap->class = nv_rd32(dev, 0x400814);
-}
-
-static void
-nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
-			     struct nouveau_pgraph_trap *trap)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
-
-	if (dev_priv->card_type < NV_50) {
-		NV_INFO(dev, "%s - nSource:", id);
-		nouveau_print_bitfield_names(nsource, nsource_names);
-		printk(", nStatus:");
-		if (dev_priv->card_type < NV_10)
-			nouveau_print_bitfield_names(nstatus, nstatus_names);
-		else
-			nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
-		printk("\n");
-	}
-
-	NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
-					"Data 0x%08x:0x%08x\n",
-					id, trap->channel, trap->subc,
-					trap->class, trap->mthd,
-					trap->data2, trap->data);
-}
-
-static int
-nouveau_pgraph_intr_swmthd(struct drm_device *dev,
-			   struct nouveau_pgraph_trap *trap)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	if (trap->channel < 0 ||
-	    trap->channel >= dev_priv->engine.fifo.channels ||
-	    !dev_priv->fifos[trap->channel])
-		return -ENODEV;
-
-	return nouveau_call_method(dev_priv->fifos[trap->channel],
-				   trap->class, trap->mthd, trap->data);
-}
-
-static inline void
-nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
-{
-	struct nouveau_pgraph_trap trap;
-	int unhandled = 0;
-
-	nouveau_graph_trap_info(dev, &trap);
-
-	if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
-		if (nouveau_pgraph_intr_swmthd(dev, &trap))
-			unhandled = 1;
-	} else {
-		unhandled = 1;
-	}
-
-	if (unhandled)
-		nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
-}
-
-
-static inline void
-nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
-{
-	struct nouveau_pgraph_trap trap;
-	int unhandled = 0;
-
-	nouveau_graph_trap_info(dev, &trap);
-	trap.nsource = nsource;
-
-	if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
-		if (nouveau_pgraph_intr_swmthd(dev, &trap))
-			unhandled = 1;
-	} else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
-		uint32_t v = nv_rd32(dev, 0x402000);
-		nv_wr32(dev, 0x402000, v);
-
-		/* dump the error anyway for now: it's useful for
-		   Gallium development */
-		unhandled = 1;
-	} else {
-		unhandled = 1;
-	}
-
-	if (unhandled && nouveau_ratelimit())
-		nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
-}
-
-static inline void
-nouveau_pgraph_intr_context_switch(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_engine *engine = &dev_priv->engine;
-	uint32_t chid;
-
-	chid = engine->fifo.channel_id(dev);
-	NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
-
-	switch (dev_priv->card_type) {
-	case NV_04:
-		nv04_graph_context_switch(dev);
-		break;
-	case NV_10:
-		nv10_graph_context_switch(dev);
-		break;
-	default:
-		NV_ERROR(dev, "Context switch not implemented\n");
-		break;
-	}
-}
-
-static void
-nouveau_pgraph_irq_handler(struct drm_device *dev)
-{
-	uint32_t status;
-
-	while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
-		uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
-
-		if (status & NV_PGRAPH_INTR_NOTIFY) {
-			nouveau_pgraph_intr_notify(dev, nsource);
-
-			status &= ~NV_PGRAPH_INTR_NOTIFY;
-			nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
-		}
-
-		if (status & NV_PGRAPH_INTR_ERROR) {
-			nouveau_pgraph_intr_error(dev, nsource);
-
-			status &= ~NV_PGRAPH_INTR_ERROR;
-			nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
-		}
-
-		if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
-			status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
-			nv_wr32(dev, NV03_PGRAPH_INTR,
-				 NV_PGRAPH_INTR_CONTEXT_SWITCH);
-
-			nouveau_pgraph_intr_context_switch(dev);
-		}
-
-		if (status) {
-			NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
-			nv_wr32(dev, NV03_PGRAPH_INTR, status);
-		}
-
-		if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
-			nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
-	}
-
-	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
-}
-
-static struct nouveau_enum_names nv50_mp_exec_error_names[] =
-{
-	{ 3, "STACK_UNDERFLOW" },
-	{ 4, "QUADON_ACTIVE" },
-	{ 8, "TIMEOUT" },
-	{ 0x10, "INVALID_OPCODE" },
-	{ 0x40, "BREAKPOINT" },
-};
-
-static void
-nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t units = nv_rd32(dev, 0x1540);
-	uint32_t addr, mp10, status, pc, oplow, ophigh;
-	int i;
-	int mps = 0;
-	for (i = 0; i < 4; i++) {
-		if (!(units & 1 << (i+24)))
-			continue;
-		if (dev_priv->chipset < 0xa0)
-			addr = 0x408200 + (tpid << 12) + (i << 7);
-		else
-			addr = 0x408100 + (tpid << 11) + (i << 7);
-		mp10 = nv_rd32(dev, addr + 0x10);
-		status = nv_rd32(dev, addr + 0x14);
-		if (!status)
-			continue;
-		if (display) {
-			nv_rd32(dev, addr + 0x20);
-			pc = nv_rd32(dev, addr + 0x24);
-			oplow = nv_rd32(dev, addr + 0x70);
-			ophigh= nv_rd32(dev, addr + 0x74);
-			NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
-					"TP %d MP %d: ", tpid, i);
-			nouveau_print_enum_names(status,
-					nv50_mp_exec_error_names);
-			printk(" at %06x warp %d, opcode %08x %08x\n",
-					pc&0xffffff, pc >> 24,
-					oplow, ophigh);
-		}
-		nv_wr32(dev, addr + 0x10, mp10);
-		nv_wr32(dev, addr + 0x14, 0);
-		mps++;
-	}
-	if (!mps && display)
-		NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
-				"No MPs claiming errors?\n", tpid);
-}
-
-static void
-nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
-		uint32_t ustatus_new, int display, const char *name)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int tps = 0;
-	uint32_t units = nv_rd32(dev, 0x1540);
-	int i, r;
-	uint32_t ustatus_addr, ustatus;
-	for (i = 0; i < 16; i++) {
-		if (!(units & (1 << i)))
-			continue;
-		if (dev_priv->chipset < 0xa0)
-			ustatus_addr = ustatus_old + (i << 12);
-		else
-			ustatus_addr = ustatus_new + (i << 11);
-		ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
-		if (!ustatus)
-			continue;
-		tps++;
-		switch (type) {
-		case 6: /* texture error... unknown for now */
-			nv50_fb_vm_trap(dev, display, name);
-			if (display) {
-				NV_ERROR(dev, "magic set %d:\n", i);
-				for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
-					NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
-						nv_rd32(dev, r));
-			}
-			break;
-		case 7: /* MP error */
-			if (ustatus & 0x00010000) {
-				nv50_pgraph_mp_trap(dev, i, display);
-				ustatus &= ~0x00010000;
-			}
-			break;
-		case 8: /* TPDMA error */
-			{
-			uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
-			uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
-			uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
-			uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
-			uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
-			uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
-			uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
-			nv50_fb_vm_trap(dev, display, name);
-			/* 2d engine destination */
-			if (ustatus & 0x00000010) {
-				if (display) {
-					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
-							i, e14, e10);
-					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
-							i, e0c, e18, e1c, e20, e24);
-				}
-				ustatus &= ~0x00000010;
-			}
-			/* Render target */
-			if (ustatus & 0x00000040) {
-				if (display) {
-					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
-							i, e14, e10);
-					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
-							i, e0c, e18, e1c, e20, e24);
-				}
-				ustatus &= ~0x00000040;
-			}
-			/* CUDA memory: l[], g[] or stack. */
-			if (ustatus & 0x00000080) {
-				if (display) {
-					if (e18 & 0x80000000) {
-						/* g[] read fault? */
-						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
-								i, e14, e10 | ((e18 >> 24) & 0x1f));
-						e18 &= ~0x1f000000;
-					} else if (e18 & 0xc) {
-						/* g[] write fault? */
-						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
-								i, e14, e10 | ((e18 >> 7) & 0x1f));
-						e18 &= ~0x00000f80;
-					} else {
-						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
-								i, e14, e10);
-					}
-					NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
-							i, e0c, e18, e1c, e20, e24);
-				}
-				ustatus &= ~0x00000080;
-			}
-			}
-			break;
-		}
-		if (ustatus) {
-			if (display)
-				NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
-		}
-		nv_wr32(dev, ustatus_addr, 0xc0000000);
-	}
-
-	if (!tps && display)
-		NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
-}
-
-static void
-nv50_pgraph_trap_handler(struct drm_device *dev)
-{
-	struct nouveau_pgraph_trap trap;
-	uint32_t status = nv_rd32(dev, 0x400108);
-	uint32_t ustatus;
-	int display = nouveau_ratelimit();
-
-
-	if (!status && display) {
-		nouveau_graph_trap_info(dev, &trap);
-		nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
-		NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
-	}
-
-	/* DISPATCH: Relays commands to other units and handles NOTIFY,
-	 * COND, QUERY. If you get a trap from it, the command is still stuck
-	 * in DISPATCH and you need to do something about it. */
-	if (status & 0x001) {
-		ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
-		if (!ustatus && display) {
-			NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
-		}
-
-		/* Known to be triggered by screwed up NOTIFY and COND... */
-		if (ustatus & 0x00000001) {
-			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
-			nv_wr32(dev, 0x400500, 0);
-			if (nv_rd32(dev, 0x400808) & 0x80000000) {
-				if (display) {
-					if (nouveau_graph_trapped_channel(dev, &trap.channel))
-						trap.channel = -1;
-					trap.class = nv_rd32(dev, 0x400814);
-					trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
-					trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
-					trap.data = nv_rd32(dev, 0x40080c);
-					trap.data2 = nv_rd32(dev, 0x400810);
-					nouveau_graph_dump_trap_info(dev,
-							"PGRAPH_TRAP_DISPATCH_FAULT", &trap);
-					NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
-					NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
-				}
-				nv_wr32(dev, 0x400808, 0);
-			} else if (display) {
-				NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
-			}
-			nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
-			nv_wr32(dev, 0x400848, 0);
-			ustatus &= ~0x00000001;
-		}
-		if (ustatus & 0x00000002) {
-			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
-			nv_wr32(dev, 0x400500, 0);
-			if (nv_rd32(dev, 0x40084c) & 0x80000000) {
-				if (display) {
-					if (nouveau_graph_trapped_channel(dev, &trap.channel))
-						trap.channel = -1;
-					trap.class = nv_rd32(dev, 0x400814);
-					trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
-					trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
-					trap.data = nv_rd32(dev, 0x40085c);
-					trap.data2 = 0;
-					nouveau_graph_dump_trap_info(dev,
-							"PGRAPH_TRAP_DISPATCH_QUERY", &trap);
-					NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
-				}
-				nv_wr32(dev, 0x40084c, 0);
-			} else if (display) {
-				NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
-			}
-			ustatus &= ~0x00000002;
-		}
-		if (ustatus && display)
-			NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
-		nv_wr32(dev, 0x400804, 0xc0000000);
-		nv_wr32(dev, 0x400108, 0x001);
-		status &= ~0x001;
-	}
-
-	/* TRAPs other than dispatch use the "normal" trap regs. */
-	if (status && display) {
-		nouveau_graph_trap_info(dev, &trap);
-		nouveau_graph_dump_trap_info(dev,
-				"PGRAPH_TRAP", &trap);
-	}
-
-	/* M2MF: Memory to memory copy engine. */
-	if (status & 0x002) {
-		ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
-		if (!ustatus && display) {
-			NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
-		}
-		if (ustatus & 0x00000001) {
-			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
-			ustatus &= ~0x00000001;
-		}
-		if (ustatus & 0x00000002) {
-			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
-			ustatus &= ~0x00000002;
-		}
-		if (ustatus & 0x00000004) {
-			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
-			ustatus &= ~0x00000004;
-		}
-		NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
-				nv_rd32(dev, 0x406804),
-				nv_rd32(dev, 0x406808),
-				nv_rd32(dev, 0x40680c),
-				nv_rd32(dev, 0x406810));
-		if (ustatus && display)
-			NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
-		/* No sane way found yet -- just reset the bugger. */
-		nv_wr32(dev, 0x400040, 2);
-		nv_wr32(dev, 0x400040, 0);
-		nv_wr32(dev, 0x406800, 0xc0000000);
-		nv_wr32(dev, 0x400108, 0x002);
-		status &= ~0x002;
-	}
-
-	/* VFETCH: Fetches data from vertex buffers. */
-	if (status & 0x004) {
-		ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
-		if (!ustatus && display) {
-			NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
-		}
-		if (ustatus & 0x00000001) {
-			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
-			NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
-					nv_rd32(dev, 0x400c00),
-					nv_rd32(dev, 0x400c08),
-					nv_rd32(dev, 0x400c0c),
-					nv_rd32(dev, 0x400c10));
-			ustatus &= ~0x00000001;
-		}
-		if (ustatus && display)
-			NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
-		nv_wr32(dev, 0x400c04, 0xc0000000);
-		nv_wr32(dev, 0x400108, 0x004);
-		status &= ~0x004;
-	}
-
-	/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
-	if (status & 0x008) {
-		ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
-		if (!ustatus && display) {
-			NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
-		}
-		if (ustatus & 0x00000001) {
-			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
-			NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
-					nv_rd32(dev, 0x401804),
-					nv_rd32(dev, 0x401808),
-					nv_rd32(dev, 0x40180c),
-					nv_rd32(dev, 0x401810));
-			ustatus &= ~0x00000001;
-		}
-		if (ustatus && display)
-			NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
-		/* No sane way found yet -- just reset the bugger. */
-		nv_wr32(dev, 0x400040, 0x80);
-		nv_wr32(dev, 0x400040, 0);
-		nv_wr32(dev, 0x401800, 0xc0000000);
-		nv_wr32(dev, 0x400108, 0x008);
-		status &= ~0x008;
-	}
-
-	/* CCACHE: Handles code and c[] caches and fills them. */
-	if (status & 0x010) {
-		ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
-		if (!ustatus && display) {
-			NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
-		}
-		if (ustatus & 0x00000001) {
-			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
-			NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
-					nv_rd32(dev, 0x405800),
-					nv_rd32(dev, 0x405804),
-					nv_rd32(dev, 0x405808),
-					nv_rd32(dev, 0x40580c),
-					nv_rd32(dev, 0x405810),
-					nv_rd32(dev, 0x405814),
-					nv_rd32(dev, 0x40581c));
-			ustatus &= ~0x00000001;
-		}
-		if (ustatus && display)
-			NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
-		nv_wr32(dev, 0x405018, 0xc0000000);
-		nv_wr32(dev, 0x400108, 0x010);
-		status &= ~0x010;
-	}
-
-	/* Unknown, not seen yet... 0x402000 is the only trap status reg
-	 * remaining, so try to handle it anyway. Perhaps related to that
-	 * unknown DMA slot on tesla? */
-	if (status & 0x20) {
-		nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
-		ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
-		if (display)
-			NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
-		nv_wr32(dev, 0x402000, 0xc0000000);
-		/* no status modifiction on purpose */
-	}
-
-	/* TEXTURE: CUDA texturing units */
-	if (status & 0x040) {
-		nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
-				"PGRAPH_TRAP_TEXTURE");
-		nv_wr32(dev, 0x400108, 0x040);
-		status &= ~0x040;
-	}
-
-	/* MP: CUDA execution engines. */
-	if (status & 0x080) {
-		nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
-				"PGRAPH_TRAP_MP");
-		nv_wr32(dev, 0x400108, 0x080);
-		status &= ~0x080;
-	}
-
-	/* TPDMA:  Handles TP-initiated uncached memory accesses:
-	 * l[], g[], stack, 2d surfaces, render targets. */
-	if (status & 0x100) {
-		nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
-				"PGRAPH_TRAP_TPDMA");
-		nv_wr32(dev, 0x400108, 0x100);
-		status &= ~0x100;
-	}
-
-	if (status) {
-		if (display)
-			NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
-				status);
-		nv_wr32(dev, 0x400108, status);
-	}
-}
-
-/* There must be a *lot* of these. Will take some time to gather them up. */
-static struct nouveau_enum_names nv50_data_error_names[] =
-{
-	{ 4,	"INVALID_VALUE" },
-	{ 5,	"INVALID_ENUM" },
-	{ 8,	"INVALID_OBJECT" },
-	{ 0xc,	"INVALID_BITFIELD" },
-	{ 0x28,	"MP_NO_REG_SPACE" },
-	{ 0x2b,	"MP_BLOCK_SIZE_MISMATCH" },
-};
-
-static void
-nv50_pgraph_irq_handler(struct drm_device *dev)
-{
-	struct nouveau_pgraph_trap trap;
-	int unhandled = 0;
-	uint32_t status;
-
-	while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
-		/* NOTIFY: You've set a NOTIFY an a command and it's done. */
-		if (status & 0x00000001) {
-			nouveau_graph_trap_info(dev, &trap);
-			if (nouveau_ratelimit())
-				nouveau_graph_dump_trap_info(dev,
-						"PGRAPH_NOTIFY", &trap);
-			status &= ~0x00000001;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
-		}
-
-		/* COMPUTE_QUERY: Purpose and exact cause unknown, happens
-		 * when you write 0x200 to 0x50c0 method 0x31c. */
-		if (status & 0x00000002) {
-			nouveau_graph_trap_info(dev, &trap);
-			if (nouveau_ratelimit())
-				nouveau_graph_dump_trap_info(dev,
-						"PGRAPH_COMPUTE_QUERY", &trap);
-			status &= ~0x00000002;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
-		}
-
-		/* Unknown, never seen: 0x4 */
-
-		/* ILLEGAL_MTHD: You used a wrong method for this class. */
-		if (status & 0x00000010) {
-			nouveau_graph_trap_info(dev, &trap);
-			if (nouveau_pgraph_intr_swmthd(dev, &trap))
-				unhandled = 1;
-			if (unhandled && nouveau_ratelimit())
-				nouveau_graph_dump_trap_info(dev,
-						"PGRAPH_ILLEGAL_MTHD", &trap);
-			status &= ~0x00000010;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
-		}
-
-		/* ILLEGAL_CLASS: You used a wrong class. */
-		if (status & 0x00000020) {
-			nouveau_graph_trap_info(dev, &trap);
-			if (nouveau_ratelimit())
-				nouveau_graph_dump_trap_info(dev,
-						"PGRAPH_ILLEGAL_CLASS", &trap);
-			status &= ~0x00000020;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
-		}
-
-		/* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
-		if (status & 0x00000040) {
-			nouveau_graph_trap_info(dev, &trap);
-			if (nouveau_ratelimit())
-				nouveau_graph_dump_trap_info(dev,
-						"PGRAPH_DOUBLE_NOTIFY", &trap);
-			status &= ~0x00000040;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
-		}
-
-		/* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
-		if (status & 0x00001000) {
-			nv_wr32(dev, 0x400500, 0x00000000);
-			nv_wr32(dev, NV03_PGRAPH_INTR,
-				NV_PGRAPH_INTR_CONTEXT_SWITCH);
-			nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
-				NV40_PGRAPH_INTR_EN) &
-				~NV_PGRAPH_INTR_CONTEXT_SWITCH);
-			nv_wr32(dev, 0x400500, 0x00010001);
-
-			nv50_graph_context_switch(dev);
-
-			status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
-		}
-
-		/* BUFFER_NOTIFY: Your m2mf transfer finished */
-		if (status & 0x00010000) {
-			nouveau_graph_trap_info(dev, &trap);
-			if (nouveau_ratelimit())
-				nouveau_graph_dump_trap_info(dev,
-						"PGRAPH_BUFFER_NOTIFY", &trap);
-			status &= ~0x00010000;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
-		}
-
-		/* DATA_ERROR: Invalid value for this method, or invalid
-		 * state in current PGRAPH context for this operation */
-		if (status & 0x00100000) {
-			nouveau_graph_trap_info(dev, &trap);
-			if (nouveau_ratelimit()) {
-				nouveau_graph_dump_trap_info(dev,
-						"PGRAPH_DATA_ERROR", &trap);
-				NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
-				nouveau_print_enum_names(nv_rd32(dev, 0x400110),
-						nv50_data_error_names);
-				printk("\n");
-			}
-			status &= ~0x00100000;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
-		}
-
-		/* TRAP: Something bad happened in the middle of command
-		 * execution.  Has a billion types, subtypes, and even
-		 * subsubtypes. */
-		if (status & 0x00200000) {
-			nv50_pgraph_trap_handler(dev);
-			status &= ~0x00200000;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
-		}
-
-		/* Unknown, never seen: 0x00400000 */
-
-		/* SINGLE_STEP: Happens on every method if you turned on
-		 * single stepping in 40008c */
-		if (status & 0x01000000) {
-			nouveau_graph_trap_info(dev, &trap);
-			if (nouveau_ratelimit())
-				nouveau_graph_dump_trap_info(dev,
-						"PGRAPH_SINGLE_STEP", &trap);
-			status &= ~0x01000000;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
-		}
-
-		/* 0x02000000 happens when you pause a ctxprog...
-		 * but the only way this can happen that I know is by
-		 * poking the relevant MMIO register, and we don't
-		 * do that. */
-
-		if (status) {
-			NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
-				status);
-			nv_wr32(dev, NV03_PGRAPH_INTR, status);
-		}
-
-		{
-			const int isb = (1 << 16) | (1 << 0);
-
-			if ((nv_rd32(dev, 0x400500) & isb) != isb)
-				nv_wr32(dev, 0x400500,
-					nv_rd32(dev, 0x400500) | isb);
-		}
-	}
-
-	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
-	if (nv_rd32(dev, 0x400824) & (1 << 31))
-		nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
-}
-
-static void
-nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
-{
-	if (crtc & 1)
-		nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
-
-	if (crtc & 2)
-		nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
-}
-
 irqreturn_t
 nouveau_irq_handler(DRM_IRQ_ARGS)
 {
 	struct drm_device *dev = (struct drm_device *)arg;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t status;
 	unsigned long flags;
+	u32 stat;
+	int i;
 
-	status = nv_rd32(dev, NV03_PMC_INTR_0);
-	if (!status)
+	stat = nv_rd32(dev, NV03_PMC_INTR_0);
+	if (!stat)
 		return IRQ_NONE;
 
 	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	for (i = 0; i < 32 && stat; i++) {
+		if (!(stat & (1 << i)) || !dev_priv->irq_handler[i])
+			continue;
 
-	if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
-		nouveau_fifo_irq_handler(dev);
-		status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
+		dev_priv->irq_handler[i](dev);
+		stat &= ~(1 << i);
 	}
 
-	if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
-		if (dev_priv->card_type >= NV_50)
-			nv50_pgraph_irq_handler(dev);
-		else
-			nouveau_pgraph_irq_handler(dev);
-
-		status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
-	}
-
-	if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
-		nouveau_crtc_irq_handler(dev, (status>>24)&3);
-		status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
-	}
-
-	if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
-		      NV_PMC_INTR_0_NV50_I2C_PENDING)) {
-		nv50_display_irq_handler(dev);
-		status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
-			    NV_PMC_INTR_0_NV50_I2C_PENDING);
-	}
-
-	if (status)
-		NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
-
+	if (dev_priv->msi_enabled)
+		nv_wr08(dev, 0x00088068, 0xff);
 	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 
+	if (stat && nouveau_ratelimit())
+		NV_ERROR(dev, "PMC - unhandled INTR 0x%08x\n", stat);
 	return IRQ_HANDLED;
 }
+
+int
+nouveau_irq_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (nouveau_msi != 0 && dev_priv->card_type >= NV_50) {
+		ret = pci_enable_msi(dev->pdev);
+		if (ret == 0) {
+			NV_INFO(dev, "enabled MSI\n");
+			dev_priv->msi_enabled = true;
+		}
+	}
+
+	return drm_irq_install(dev);
+}
+
+void
+nouveau_irq_fini(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	drm_irq_uninstall(dev);
+	if (dev_priv->msi_enabled)
+		pci_disable_msi(dev->pdev);
+}
+
+void
+nouveau_irq_register(struct drm_device *dev, int status_bit,
+		     void (*handler)(struct drm_device *))
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	dev_priv->irq_handler[status_bit] = handler;
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+}
+
+void
+nouveau_irq_unregister(struct drm_device *dev, int status_bit)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	dev_priv->irq_handler[status_bit] = NULL;
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index fe4a30d..2241811 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -36,183 +36,112 @@
 
 #include "nouveau_drv.h"
 #include "nouveau_pm.h"
+#include "nouveau_mm.h"
+#include "nouveau_vm.h"
 
 /*
  * NV10-NV40 tiling helpers
  */
 
 static void
-nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
-			   uint32_t size, uint32_t pitch)
+nv10_mem_update_tile_region(struct drm_device *dev,
+			    struct nouveau_tile_reg *tile, uint32_t addr,
+			    uint32_t size, uint32_t pitch, uint32_t flags)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
 	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
 	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
-	struct nouveau_tile_reg *tile = &dev_priv->tile[i];
+	int i = tile - dev_priv->tile.reg;
+	unsigned long save;
 
-	tile->addr = addr;
-	tile->size = size;
-	tile->used = !!pitch;
-	nouveau_fence_unref((void **)&tile->fence);
+	nouveau_fence_unref(&tile->fence);
 
+	if (tile->pitch)
+		pfb->free_tile_region(dev, i);
+
+	if (pitch)
+		pfb->init_tile_region(dev, i, addr, size, pitch, flags);
+
+	spin_lock_irqsave(&dev_priv->context_switch_lock, save);
 	pfifo->reassign(dev, false);
 	pfifo->cache_pull(dev, false);
 
 	nouveau_wait_for_idle(dev);
 
-	pgraph->set_region_tiling(dev, i, addr, size, pitch);
-	pfb->set_region_tiling(dev, i, addr, size, pitch);
+	pfb->set_tile_region(dev, i);
+	pgraph->set_tile_region(dev, i);
 
 	pfifo->cache_pull(dev, true);
 	pfifo->reassign(dev, true);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
+}
+
+static struct nouveau_tile_reg *
+nv10_mem_get_tile_region(struct drm_device *dev, int i)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+	spin_lock(&dev_priv->tile.lock);
+
+	if (!tile->used &&
+	    (!tile->fence || nouveau_fence_signalled(tile->fence)))
+		tile->used = true;
+	else
+		tile = NULL;
+
+	spin_unlock(&dev_priv->tile.lock);
+	return tile;
+}
+
+void
+nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
+			 struct nouveau_fence *fence)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (tile) {
+		spin_lock(&dev_priv->tile.lock);
+		if (fence) {
+			/* Mark it as pending. */
+			tile->fence = fence;
+			nouveau_fence_ref(fence);
+		}
+
+		tile->used = false;
+		spin_unlock(&dev_priv->tile.lock);
+	}
 }
 
 struct nouveau_tile_reg *
 nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
-		    uint32_t pitch)
+		    uint32_t pitch, uint32_t flags)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
-	struct nouveau_tile_reg *found = NULL;
-	unsigned long i, flags;
-
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-
-	for (i = 0; i < pfb->num_tiles; i++) {
-		struct nouveau_tile_reg *tile = &dev_priv->tile[i];
-
-		if (tile->used)
-			/* Tile region in use. */
-			continue;
-
-		if (tile->fence &&
-		    !nouveau_fence_signalled(tile->fence, NULL))
-			/* Pending tile region. */
-			continue;
-
-		if (max(tile->addr, addr) <
-		    min(tile->addr + tile->size, addr + size))
-			/* Kill an intersecting tile region. */
-			nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
-
-		if (pitch && !found) {
-			/* Free tile region. */
-			nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
-			found = tile;
-		}
-	}
-
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
-	return found;
-}
-
-void
-nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile,
-		       struct nouveau_fence *fence)
-{
-	if (fence) {
-		/* Mark it as pending. */
-		tile->fence = fence;
-		nouveau_fence_ref(fence);
-	}
-
-	tile->used = false;
-}
-
-/*
- * NV50 VM helpers
- */
-int
-nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
-			uint32_t flags, uint64_t phys)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *pgt;
-	unsigned block;
+	struct nouveau_tile_reg *tile, *found = NULL;
 	int i;
 
-	virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
-	size = (size >> 16) << 1;
+	for (i = 0; i < pfb->num_tiles; i++) {
+		tile = nv10_mem_get_tile_region(dev, i);
 
-	phys |= ((uint64_t)flags << 32);
-	phys |= 1;
-	if (dev_priv->vram_sys_base) {
-		phys += dev_priv->vram_sys_base;
-		phys |= 0x30;
+		if (pitch && !found) {
+			found = tile;
+			continue;
+
+		} else if (tile && tile->pitch) {
+			/* Kill an unused tile region. */
+			nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
+		}
+
+		nv10_mem_put_tile_region(dev, tile, NULL);
 	}
 
-	while (size) {
-		unsigned offset_h = upper_32_bits(phys);
-		unsigned offset_l = lower_32_bits(phys);
-		unsigned pte, end;
-
-		for (i = 7; i >= 0; i--) {
-			block = 1 << (i + 1);
-			if (size >= block && !(virt & (block - 1)))
-				break;
-		}
-		offset_l |= (i << 7);
-
-		phys += block << 15;
-		size -= block;
-
-		while (block) {
-			pgt = dev_priv->vm_vram_pt[virt >> 14];
-			pte = virt & 0x3ffe;
-
-			end = pte + block;
-			if (end > 16384)
-				end = 16384;
-			block -= (end - pte);
-			virt  += (end - pte);
-
-			while (pte < end) {
-				nv_wo32(pgt, (pte * 4) + 0, offset_l);
-				nv_wo32(pgt, (pte * 4) + 4, offset_h);
-				pte += 2;
-			}
-		}
-	}
-
-	dev_priv->engine.instmem.flush(dev);
-	dev_priv->engine.fifo.tlb_flush(dev);
-	dev_priv->engine.graph.tlb_flush(dev);
-	nv50_vm_flush(dev, 6);
-	return 0;
-}
-
-void
-nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *pgt;
-	unsigned pages, pte, end;
-
-	virt -= dev_priv->vm_vram_base;
-	pages = (size >> 16) << 1;
-
-	while (pages) {
-		pgt = dev_priv->vm_vram_pt[virt >> 29];
-		pte = (virt & 0x1ffe0000ULL) >> 15;
-
-		end = pte + pages;
-		if (end > 16384)
-			end = 16384;
-		pages -= (end - pte);
-		virt  += (end - pte) << 15;
-
-		while (pte < end) {
-			nv_wo32(pgt, (pte * 4), 0);
-			pte++;
-		}
-	}
-
-	dev_priv->engine.instmem.flush(dev);
-	dev_priv->engine.fifo.tlb_flush(dev);
-	dev_priv->engine.graph.tlb_flush(dev);
-	nv50_vm_flush(dev, 6);
+	if (found)
+		nv10_mem_update_tile_region(dev, found, addr, size,
+					    pitch, flags);
+	return found;
 }
 
 /*
@@ -312,62 +241,7 @@
 	return 0;
 }
 
-static void
-nv50_vram_preinit(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int i, parts, colbits, rowbitsa, rowbitsb, banks;
-	u64 rowsize, predicted;
-	u32 r0, r4, rt, ru;
-
-	r0 = nv_rd32(dev, 0x100200);
-	r4 = nv_rd32(dev, 0x100204);
-	rt = nv_rd32(dev, 0x100250);
-	ru = nv_rd32(dev, 0x001540);
-	NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
-
-	for (i = 0, parts = 0; i < 8; i++) {
-		if (ru & (0x00010000 << i))
-			parts++;
-	}
-
-	colbits  =  (r4 & 0x0000f000) >> 12;
-	rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
-	rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
-	banks    = ((r4 & 0x01000000) ? 8 : 4);
-
-	rowsize = parts * banks * (1 << colbits) * 8;
-	predicted = rowsize << rowbitsa;
-	if (r0 & 0x00000004)
-		predicted += rowsize << rowbitsb;
-
-	if (predicted != dev_priv->vram_size) {
-		NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
-			(u32)(dev_priv->vram_size >> 20));
-		NV_WARN(dev, "we calculated %dMiB VRAM\n",
-			(u32)(predicted >> 20));
-	}
-
-	dev_priv->vram_rblock_size = rowsize >> 12;
-	if (rt & 1)
-		dev_priv->vram_rblock_size *= 3;
-
-	NV_DEBUG(dev, "rblock %lld bytes\n",
-		 (u64)dev_priv->vram_rblock_size << 12);
-}
-
-static void
-nvaa_vram_preinit(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	/* To our knowledge, there's no large scale reordering of pages
-	 * that occurs on IGP chipsets.
-	 */
-	dev_priv->vram_rblock_size = 1;
-}
-
-static int
+int
 nouveau_mem_detect(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -381,40 +255,25 @@
 	if (dev_priv->card_type < NV_50) {
 		dev_priv->vram_size  = nv_rd32(dev, NV04_PFB_FIFO_DATA);
 		dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
-	} else
-	if (dev_priv->card_type < NV_C0) {
-		dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
-		dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
-		dev_priv->vram_size &= 0xffffffff00ll;
-
-		switch (dev_priv->chipset) {
-		case 0xaa:
-		case 0xac:
-		case 0xaf:
-			dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
-			dev_priv->vram_sys_base <<= 12;
-			nvaa_vram_preinit(dev);
-			break;
-		default:
-			nv50_vram_preinit(dev);
-			break;
-		}
 	} else {
 		dev_priv->vram_size  = nv_rd32(dev, 0x10f20c) << 20;
 		dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
 	}
 
-	NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
-	if (dev_priv->vram_sys_base) {
-		NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
-			dev_priv->vram_sys_base);
-	}
-
 	if (dev_priv->vram_size)
 		return 0;
 	return -ENOMEM;
 }
 
+bool
+nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags)
+{
+	if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
+		return true;
+
+	return false;
+}
+
 #if __OS_HAS_AGP
 static unsigned long
 get_agp_mode(struct drm_device *dev, unsigned long mode)
@@ -547,10 +406,6 @@
 	if (ret)
 		return ret;
 
-	ret = nouveau_mem_detect(dev);
-	if (ret)
-		return ret;
-
 	dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
 
 	ret = nouveau_ttm_global_init(dev_priv);
@@ -566,13 +421,6 @@
 		return ret;
 	}
 
-	dev_priv->fb_available_size = dev_priv->vram_size;
-	dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
-	if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
-		dev_priv->fb_mappable_pages =
-			pci_resource_len(dev->pdev, 1);
-	dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
-
 	/* reserve space at end of VRAM for PRAMIN */
 	if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 ||
 	    dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b)
@@ -583,6 +431,22 @@
 	else
 		dev_priv->ramin_rsvd_vram = (512 * 1024);
 
+	ret = dev_priv->engine.vram.init(dev);
+	if (ret)
+		return ret;
+
+	NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
+	if (dev_priv->vram_sys_base) {
+		NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
+			dev_priv->vram_sys_base);
+	}
+
+	dev_priv->fb_available_size = dev_priv->vram_size;
+	dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
+	if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
+		dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
+	dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
+
 	dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
 	dev_priv->fb_aper_free = dev_priv->fb_available_size;
 
@@ -799,3 +663,114 @@
 
 	kfree(mem->timing);
 }
+
+static int
+nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size)
+{
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
+	struct nouveau_mm *mm;
+	u32 b_size;
+	int ret;
+
+	p_size = (p_size << PAGE_SHIFT) >> 12;
+	b_size = dev_priv->vram_rblock_size >> 12;
+
+	ret = nouveau_mm_init(&mm, 0, p_size, b_size);
+	if (ret)
+		return ret;
+
+	man->priv = mm;
+	return 0;
+}
+
+static int
+nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
+{
+	struct nouveau_mm *mm = man->priv;
+	int ret;
+
+	ret = nouveau_mm_fini(&mm);
+	if (ret)
+		return ret;
+
+	man->priv = NULL;
+	return 0;
+}
+
+static void
+nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
+			 struct ttm_mem_reg *mem)
+{
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
+	struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+	struct drm_device *dev = dev_priv->dev;
+
+	vram->put(dev, (struct nouveau_vram **)&mem->mm_node);
+}
+
+static int
+nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
+			 struct ttm_buffer_object *bo,
+			 struct ttm_placement *placement,
+			 struct ttm_mem_reg *mem)
+{
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
+	struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+	struct drm_device *dev = dev_priv->dev;
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+	struct nouveau_vram *node;
+	u32 size_nc = 0;
+	int ret;
+
+	if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
+		size_nc = 1 << nvbo->vma.node->type;
+
+	ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
+			mem->page_alignment << PAGE_SHIFT, size_nc,
+			(nvbo->tile_flags >> 8) & 0xff, &node);
+	if (ret)
+		return ret;
+
+	mem->mm_node = node;
+	mem->start   = node->offset >> PAGE_SHIFT;
+	return 0;
+}
+
+void
+nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+{
+	struct nouveau_mm *mm = man->priv;
+	struct nouveau_mm_node *r;
+	u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {};
+	int i;
+
+	mutex_lock(&mm->mutex);
+	list_for_each_entry(r, &mm->nodes, nl_entry) {
+		printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n",
+		       prefix, r->free ? "free" : "used", r->type,
+		       ((u64)r->offset << 12),
+		       (((u64)r->offset + r->length) << 12));
+		total += r->length;
+		ttotal[r->type] += r->length;
+		if (r->free)
+			tfree[r->type] += r->length;
+		else
+			tused[r->type] += r->length;
+	}
+	mutex_unlock(&mm->mutex);
+
+	printk(KERN_DEBUG "%s  total: 0x%010llx\n", prefix, total << 12);
+	for (i = 0; i < 3; i++) {
+		printk(KERN_DEBUG "%s type %d: 0x%010llx, "
+				  "used 0x%010llx, free 0x%010llx\n", prefix,
+		       i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12);
+	}
+}
+
+const struct ttm_mem_type_manager_func nouveau_vram_manager = {
+	nouveau_vram_manager_init,
+	nouveau_vram_manager_fini,
+	nouveau_vram_manager_new,
+	nouveau_vram_manager_del,
+	nouveau_vram_manager_debug
+};
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
new file mode 100644
index 0000000..cdbb11e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -0,0 +1,271 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+static inline void
+region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a)
+{
+	list_del(&a->nl_entry);
+	list_del(&a->fl_entry);
+	kfree(a);
+}
+
+static struct nouveau_mm_node *
+region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
+{
+	struct nouveau_mm_node *b;
+
+	if (a->length == size)
+		return a;
+
+	b = kmalloc(sizeof(*b), GFP_KERNEL);
+	if (unlikely(b == NULL))
+		return NULL;
+
+	b->offset = a->offset;
+	b->length = size;
+	b->free   = a->free;
+	b->type   = a->type;
+	a->offset += size;
+	a->length -= size;
+	list_add_tail(&b->nl_entry, &a->nl_entry);
+	if (b->free)
+		list_add_tail(&b->fl_entry, &a->fl_entry);
+	return b;
+}
+
+static struct nouveau_mm_node *
+nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
+{
+	struct nouveau_mm_node *prev, *next;
+
+	/* try to merge with free adjacent entries of same type */
+	prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry);
+	if (this->nl_entry.prev != &rmm->nodes) {
+		if (prev->free && prev->type == this->type) {
+			prev->length += this->length;
+			region_put(rmm, this);
+			this = prev;
+		}
+	}
+
+	next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
+	if (this->nl_entry.next != &rmm->nodes) {
+		if (next->free && next->type == this->type) {
+			next->offset  = this->offset;
+			next->length += this->length;
+			region_put(rmm, this);
+			this = next;
+		}
+	}
+
+	return this;
+}
+
+void
+nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
+{
+	u32 block_s, block_l;
+
+	this->free = true;
+	list_add(&this->fl_entry, &rmm->free);
+	this = nouveau_mm_merge(rmm, this);
+
+	/* any entirely free blocks now?  we'll want to remove typing
+	 * on them now so they can be use for any memory allocation
+	 */
+	block_s = roundup(this->offset, rmm->block_size);
+	if (block_s + rmm->block_size > this->offset + this->length)
+		return;
+
+	/* split off any still-typed region at the start */
+	if (block_s != this->offset) {
+		if (!region_split(rmm, this, block_s - this->offset))
+			return;
+	}
+
+	/* split off the soon-to-be-untyped block(s) */
+	block_l = rounddown(this->length, rmm->block_size);
+	if (block_l != this->length) {
+		this = region_split(rmm, this, block_l);
+		if (!this)
+			return;
+	}
+
+	/* mark as having no type, and retry merge with any adjacent
+	 * untyped blocks
+	 */
+	this->type = 0;
+	nouveau_mm_merge(rmm, this);
+}
+
+int
+nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
+	       u32 align, struct nouveau_mm_node **pnode)
+{
+	struct nouveau_mm_node *this, *tmp, *next;
+	u32 splitoff, avail, alloc;
+
+	list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) {
+		next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
+		if (this->nl_entry.next == &rmm->nodes)
+			next = NULL;
+
+		/* skip wrongly typed blocks */
+		if (this->type && this->type != type)
+			continue;
+
+		/* account for alignment */
+		splitoff = this->offset & (align - 1);
+		if (splitoff)
+			splitoff = align - splitoff;
+
+		if (this->length <= splitoff)
+			continue;
+
+		/* determine total memory available from this, and
+		 * the next block (if appropriate)
+		 */
+		avail = this->length;
+		if (next && next->free && (!next->type || next->type == type))
+			avail += next->length;
+
+		avail -= splitoff;
+
+		/* determine allocation size */
+		if (size_nc) {
+			alloc = min(avail, size);
+			alloc = rounddown(alloc, size_nc);
+			if (alloc == 0)
+				continue;
+		} else {
+			alloc = size;
+			if (avail < alloc)
+				continue;
+		}
+
+		/* untyped block, split off a chunk that's a multiple
+		 * of block_size and type it
+		 */
+		if (!this->type) {
+			u32 block = roundup(alloc + splitoff, rmm->block_size);
+			if (this->length < block)
+				continue;
+
+			this = region_split(rmm, this, block);
+			if (!this)
+				return -ENOMEM;
+
+			this->type = type;
+		}
+
+		/* stealing memory from adjacent block */
+		if (alloc > this->length) {
+			u32 amount = alloc - (this->length - splitoff);
+
+			if (!next->type) {
+				amount = roundup(amount, rmm->block_size);
+
+				next = region_split(rmm, next, amount);
+				if (!next)
+					return -ENOMEM;
+
+				next->type = type;
+			}
+
+			this->length += amount;
+			next->offset += amount;
+			next->length -= amount;
+			if (!next->length) {
+				list_del(&next->nl_entry);
+				list_del(&next->fl_entry);
+				kfree(next);
+			}
+		}
+
+		if (splitoff) {
+			if (!region_split(rmm, this, splitoff))
+				return -ENOMEM;
+		}
+
+		this = region_split(rmm, this, alloc);
+		if (this == NULL)
+			return -ENOMEM;
+
+		this->free = false;
+		list_del(&this->fl_entry);
+		*pnode = this;
+		return 0;
+	}
+
+	return -ENOMEM;
+}
+
+int
+nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block)
+{
+	struct nouveau_mm *rmm;
+	struct nouveau_mm_node *heap;
+
+	heap = kzalloc(sizeof(*heap), GFP_KERNEL);
+	if (!heap)
+		return -ENOMEM;
+	heap->free = true;
+	heap->offset = roundup(offset, block);
+	heap->length = rounddown(offset + length, block) - heap->offset;
+
+	rmm = kzalloc(sizeof(*rmm), GFP_KERNEL);
+	if (!rmm) {
+		kfree(heap);
+		return -ENOMEM;
+	}
+	rmm->block_size = block;
+	mutex_init(&rmm->mutex);
+	INIT_LIST_HEAD(&rmm->nodes);
+	INIT_LIST_HEAD(&rmm->free);
+	list_add(&heap->nl_entry, &rmm->nodes);
+	list_add(&heap->fl_entry, &rmm->free);
+
+	*prmm = rmm;
+	return 0;
+}
+
+int
+nouveau_mm_fini(struct nouveau_mm **prmm)
+{
+	struct nouveau_mm *rmm = *prmm;
+	struct nouveau_mm_node *heap =
+		list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry);
+
+	if (!list_is_singular(&rmm->nodes))
+		return -EBUSY;
+
+	kfree(heap);
+	kfree(rmm);
+	*prmm = NULL;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
new file mode 100644
index 0000000..250e642
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_REGION_H__
+#define __NOUVEAU_REGION_H__
+
+struct nouveau_mm_node {
+	struct list_head nl_entry;
+	struct list_head fl_entry;
+	struct list_head rl_entry;
+
+	bool free;
+	int  type;
+
+	u32 offset;
+	u32 length;
+};
+
+struct nouveau_mm {
+	struct list_head nodes;
+	struct list_head free;
+
+	struct mutex mutex;
+
+	u32 block_size;
+};
+
+int  nouveau_mm_init(struct nouveau_mm **, u32 offset, u32 length, u32 block);
+int  nouveau_mm_fini(struct nouveau_mm **);
+int  nouveau_mm_pre(struct nouveau_mm *);
+int  nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc,
+		    u32 align, struct nouveau_mm_node **);
+void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
+
+int  nv50_vram_init(struct drm_device *);
+int  nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
+		    u32 memtype, struct nouveau_vram **);
+void nv50_vram_del(struct drm_device *, struct nouveau_vram **);
+bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 2cc59f8..a050b7b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -99,7 +99,6 @@
 		       int size, uint32_t *b_offset)
 {
 	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_gpuobj *nobj = NULL;
 	struct drm_mm_node *mem;
 	uint32_t offset;
@@ -113,31 +112,15 @@
 		return -ENOMEM;
 	}
 
-	offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
-	if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) {
-		target = NV_DMA_TARGET_VIDMEM;
-	} else
-	if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_TT) {
-		if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA &&
-		    dev_priv->card_type < NV_50) {
-			ret = nouveau_sgdma_get_page(dev, offset, &offset);
-			if (ret)
-				return ret;
-			target = NV_DMA_TARGET_PCI;
-		} else {
-			target = NV_DMA_TARGET_AGP;
-			if (dev_priv->card_type >= NV_50)
-				offset += dev_priv->vm_gart_base;
-		}
-	} else {
-		NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
-			 chan->notifier_bo->bo.mem.mem_type);
-		return -EINVAL;
-	}
+	if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
+		target = NV_MEM_TARGET_VRAM;
+	else
+		target = NV_MEM_TARGET_GART;
+	offset  = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
 	offset += mem->start;
 
 	ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
-				     mem->size, NV_DMA_ACCESS_RW, target,
+				     mem->size, NV_MEM_ACCESS_RW, target,
 				     &nobj);
 	if (ret) {
 		drm_mm_put_block(mem);
@@ -185,11 +168,11 @@
 	struct nouveau_channel *chan;
 	int ret;
 
-	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan);
+	chan = nouveau_channel_get(dev, file_priv, na->channel);
+	if (IS_ERR(chan))
+		return PTR_ERR(chan);
 
 	ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset);
-	if (ret)
-		return ret;
-
-	return 0;
+	nouveau_channel_put(&chan);
+	return ret;
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index dd572ad..55c9fdc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -35,6 +35,102 @@
 #include "nouveau_drv.h"
 #include "nouveau_drm.h"
 #include "nouveau_ramht.h"
+#include "nouveau_vm.h"
+
+struct nouveau_gpuobj_method {
+	struct list_head head;
+	u32 mthd;
+	int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);
+};
+
+struct nouveau_gpuobj_class {
+	struct list_head head;
+	struct list_head methods;
+	u32 id;
+	u32 engine;
+};
+
+int
+nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj_class *oc;
+
+	oc = kzalloc(sizeof(*oc), GFP_KERNEL);
+	if (!oc)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&oc->methods);
+	oc->id = class;
+	oc->engine = engine;
+	list_add(&oc->head, &dev_priv->classes);
+	return 0;
+}
+
+int
+nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,
+			int (*exec)(struct nouveau_channel *, u32, u32, u32))
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj_method *om;
+	struct nouveau_gpuobj_class *oc;
+
+	list_for_each_entry(oc, &dev_priv->classes, head) {
+		if (oc->id == class)
+			goto found;
+	}
+
+	return -EINVAL;
+
+found:
+	om = kzalloc(sizeof(*om), GFP_KERNEL);
+	if (!om)
+		return -ENOMEM;
+
+	om->mthd = mthd;
+	om->exec = exec;
+	list_add(&om->head, &oc->methods);
+	return 0;
+}
+
+int
+nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,
+			 u32 class, u32 mthd, u32 data)
+{
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+	struct nouveau_gpuobj_method *om;
+	struct nouveau_gpuobj_class *oc;
+
+	list_for_each_entry(oc, &dev_priv->classes, head) {
+		if (oc->id != class)
+			continue;
+
+		list_for_each_entry(om, &oc->methods, head) {
+			if (om->mthd == mthd)
+				return om->exec(chan, class, mthd, data);
+		}
+	}
+
+	return -ENOENT;
+}
+
+int
+nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
+			  u32 class, u32 mthd, u32 data)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = NULL;
+	unsigned long flags;
+	int ret = -EINVAL;
+
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
+	if (chid > 0 && chid < dev_priv->engine.fifo.channels)
+		chan = dev_priv->channels.ptr[chid];
+	if (chan)
+		ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+	return ret;
+}
 
 /* NVidia uses context objects to drive drawing operations.
 
@@ -73,17 +169,14 @@
 		   struct nouveau_gpuobj **gpuobj_ret)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_engine *engine = &dev_priv->engine;
+	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
 	struct nouveau_gpuobj *gpuobj;
 	struct drm_mm_node *ramin = NULL;
-	int ret;
+	int ret, i;
 
 	NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
 		 chan ? chan->id : -1, size, align, flags);
 
-	if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
-		return -EINVAL;
-
 	gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
 	if (!gpuobj)
 		return -ENOMEM;
@@ -98,88 +191,41 @@
 	spin_unlock(&dev_priv->ramin_lock);
 
 	if (chan) {
-		NV_DEBUG(dev, "channel heap\n");
-
 		ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
 		if (ramin)
 			ramin = drm_mm_get_block(ramin, size, align);
-
 		if (!ramin) {
 			nouveau_gpuobj_ref(NULL, &gpuobj);
 			return -ENOMEM;
 		}
-	} else {
-		NV_DEBUG(dev, "global heap\n");
 
-		/* allocate backing pages, sets vinst */
-		ret = engine->instmem.populate(dev, gpuobj, &size);
-		if (ret) {
-			nouveau_gpuobj_ref(NULL, &gpuobj);
-			return ret;
-		}
-
-		/* try and get aperture space */
-		do {
-			if (drm_mm_pre_get(&dev_priv->ramin_heap))
-				return -ENOMEM;
-
-			spin_lock(&dev_priv->ramin_lock);
-			ramin = drm_mm_search_free(&dev_priv->ramin_heap, size,
-						   align, 0);
-			if (ramin == NULL) {
-				spin_unlock(&dev_priv->ramin_lock);
-				nouveau_gpuobj_ref(NULL, &gpuobj);
-				return -ENOMEM;
-			}
-
-			ramin = drm_mm_get_block_atomic(ramin, size, align);
-			spin_unlock(&dev_priv->ramin_lock);
-		} while (ramin == NULL);
-
-		/* on nv50 it's ok to fail, we have a fallback path */
-		if (!ramin && dev_priv->card_type < NV_50) {
-			nouveau_gpuobj_ref(NULL, &gpuobj);
-			return -ENOMEM;
-		}
-	}
-
-	/* if we got a chunk of the aperture, map pages into it */
-	gpuobj->im_pramin = ramin;
-	if (!chan && gpuobj->im_pramin && dev_priv->ramin_available) {
-		ret = engine->instmem.bind(dev, gpuobj);
-		if (ret) {
-			nouveau_gpuobj_ref(NULL, &gpuobj);
-			return ret;
-		}
-	}
-
-	/* calculate the various different addresses for the object */
-	if (chan) {
 		gpuobj->pinst = chan->ramin->pinst;
 		if (gpuobj->pinst != ~0)
-			gpuobj->pinst += gpuobj->im_pramin->start;
+			gpuobj->pinst += ramin->start;
 
-		if (dev_priv->card_type < NV_50) {
-			gpuobj->cinst = gpuobj->pinst;
-		} else {
-			gpuobj->cinst = gpuobj->im_pramin->start;
-			gpuobj->vinst = gpuobj->im_pramin->start +
-					chan->ramin->vinst;
-		}
+		gpuobj->cinst = ramin->start;
+		gpuobj->vinst = ramin->start + chan->ramin->vinst;
+		gpuobj->node  = ramin;
 	} else {
-		if (gpuobj->im_pramin)
-			gpuobj->pinst = gpuobj->im_pramin->start;
-		else
+		ret = instmem->get(gpuobj, size, align);
+		if (ret) {
+			nouveau_gpuobj_ref(NULL, &gpuobj);
+			return ret;
+		}
+
+		ret = -ENOSYS;
+		if (!(flags & NVOBJ_FLAG_DONT_MAP))
+			ret = instmem->map(gpuobj);
+		if (ret)
 			gpuobj->pinst = ~0;
-		gpuobj->cinst = 0xdeadbeef;
+
+		gpuobj->cinst = NVOBJ_CINST_GLOBAL;
 	}
 
 	if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
-		int i;
-
 		for (i = 0; i < gpuobj->size; i += 4)
 			nv_wo32(gpuobj, i, 0);
-		engine->instmem.flush(dev);
+		instmem->flush(dev);
 	}
 
 
@@ -195,6 +241,7 @@
 	NV_DEBUG(dev, "\n");
 
 	INIT_LIST_HEAD(&dev_priv->gpuobj_list);
+	INIT_LIST_HEAD(&dev_priv->classes);
 	spin_lock_init(&dev_priv->ramin_lock);
 	dev_priv->ramin_base = ~0;
 
@@ -205,9 +252,20 @@
 nouveau_gpuobj_takedown(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj_method *om, *tm;
+	struct nouveau_gpuobj_class *oc, *tc;
 
 	NV_DEBUG(dev, "\n");
 
+	list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {
+		list_for_each_entry_safe(om, tm, &oc->methods, head) {
+			list_del(&om->head);
+			kfree(om);
+		}
+		list_del(&oc->head);
+		kfree(oc);
+	}
+
 	BUG_ON(!list_empty(&dev_priv->gpuobj_list));
 }
 
@@ -219,26 +277,34 @@
 		container_of(ref, struct nouveau_gpuobj, refcount);
 	struct drm_device *dev = gpuobj->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_engine *engine = &dev_priv->engine;
+	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
 	int i;
 
 	NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
 
-	if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
+	if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
 		for (i = 0; i < gpuobj->size; i += 4)
 			nv_wo32(gpuobj, i, 0);
-		engine->instmem.flush(dev);
+		instmem->flush(dev);
 	}
 
 	if (gpuobj->dtor)
 		gpuobj->dtor(dev, gpuobj);
 
-	if (gpuobj->im_backing)
-		engine->instmem.clear(dev, gpuobj);
+	if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {
+		if (gpuobj->node) {
+			instmem->unmap(gpuobj);
+			instmem->put(gpuobj);
+		}
+	} else {
+		if (gpuobj->node) {
+			spin_lock(&dev_priv->ramin_lock);
+			drm_mm_put_block(gpuobj->node);
+			spin_unlock(&dev_priv->ramin_lock);
+		}
+	}
 
 	spin_lock(&dev_priv->ramin_lock);
-	if (gpuobj->im_pramin)
-		drm_mm_put_block(gpuobj->im_pramin);
 	list_del(&gpuobj->list);
 	spin_unlock(&dev_priv->ramin_lock);
 
@@ -278,7 +344,7 @@
 	kref_init(&gpuobj->refcount);
 	gpuobj->size  = size;
 	gpuobj->pinst = pinst;
-	gpuobj->cinst = 0xdeadbeef;
+	gpuobj->cinst = NVOBJ_CINST_GLOBAL;
 	gpuobj->vinst = vinst;
 
 	if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
@@ -335,115 +401,152 @@
    The method below creates a DMA object in instance RAM and returns a handle
    to it that can be used to set up context objects.
 */
-int
-nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
-		       uint64_t offset, uint64_t size, int access,
-		       int target, struct nouveau_gpuobj **gpuobj)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
-	int ret;
 
-	NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
-		 chan->id, class, offset, size);
-	NV_DEBUG(dev, "access=%d target=%d\n", access, target);
+void
+nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
+		     u64 base, u64 size, int target, int access,
+		     u32 type, u32 comp)
+{
+	struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
+	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+	u32 flags0;
+
+	flags0  = (comp << 29) | (type << 22) | class;
+	flags0 |= 0x00100000;
+
+	switch (access) {
+	case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
+	case NV_MEM_ACCESS_RW:
+	case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
+	default:
+		break;
+	}
 
 	switch (target) {
-	case NV_DMA_TARGET_AGP:
-		offset += dev_priv->gart_info.aper_base;
+	case NV_MEM_TARGET_VRAM:
+		flags0 |= 0x00010000;
+		break;
+	case NV_MEM_TARGET_PCI:
+		flags0 |= 0x00020000;
+		break;
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		flags0 |= 0x00030000;
+		break;
+	case NV_MEM_TARGET_GART:
+		base += dev_priv->gart_info.aper_base;
+	default:
+		flags0 &= ~0x00100000;
+		break;
+	}
+
+	/* convert to base + limit */
+	size = (base + size) - 1;
+
+	nv_wo32(obj, offset + 0x00, flags0);
+	nv_wo32(obj, offset + 0x04, lower_32_bits(size));
+	nv_wo32(obj, offset + 0x08, lower_32_bits(base));
+	nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
+				    upper_32_bits(base));
+	nv_wo32(obj, offset + 0x10, 0x00000000);
+	nv_wo32(obj, offset + 0x14, 0x00000000);
+
+	pinstmem->flush(obj->dev);
+}
+
+int
+nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
+		    int target, int access, u32 type, u32 comp,
+		    struct nouveau_gpuobj **pobj)
+{
+	struct drm_device *dev = chan->dev;
+	int ret;
+
+	ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj);
+	if (ret)
+		return ret;
+
+	nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
+			     access, type, comp);
+	return 0;
+}
+
+int
+nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
+		       u64 size, int access, int target,
+		       struct nouveau_gpuobj **pobj)
+{
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+	struct drm_device *dev = chan->dev;
+	struct nouveau_gpuobj *obj;
+	u32 flags0, flags2;
+	int ret;
+
+	if (dev_priv->card_type >= NV_50) {
+		u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
+		u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
+
+		return nv50_gpuobj_dma_new(chan, class, base, size,
+					   target, access, type, comp, pobj);
+	}
+
+	if (target == NV_MEM_TARGET_GART) {
+		if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+			target = NV_MEM_TARGET_PCI_NOSNOOP;
+			base  += dev_priv->gart_info.aper_base;
+		} else
+		if (base != 0) {
+			base = nouveau_sgdma_get_physical(dev, base);
+			target = NV_MEM_TARGET_PCI;
+		} else {
+			nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, pobj);
+			return 0;
+		}
+	}
+
+	flags0  = class;
+	flags0 |= 0x00003000; /* PT present, PT linear */
+	flags2  = 0;
+
+	switch (target) {
+	case NV_MEM_TARGET_PCI:
+		flags0 |= 0x00020000;
+		break;
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		flags0 |= 0x00030000;
 		break;
 	default:
 		break;
 	}
 
-	ret = nouveau_gpuobj_new(dev, chan,
-				 nouveau_gpuobj_class_instmem_size(dev, class),
-				 16, NVOBJ_FLAG_ZERO_ALLOC |
-				 NVOBJ_FLAG_ZERO_FREE, gpuobj);
-	if (ret) {
-		NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
+	switch (access) {
+	case NV_MEM_ACCESS_RO:
+		flags0 |= 0x00004000;
+		break;
+	case NV_MEM_ACCESS_WO:
+		flags0 |= 0x00008000;
+	default:
+		flags2 |= 0x00000002;
+		break;
+	}
+
+	flags0 |= (base & 0x00000fff) << 20;
+	flags2 |= (base & 0xfffff000);
+
+	ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
+	if (ret)
 		return ret;
-	}
 
-	if (dev_priv->card_type < NV_50) {
-		uint32_t frame, adjust, pte_flags = 0;
+	nv_wo32(obj, 0x00, flags0);
+	nv_wo32(obj, 0x04, size - 1);
+	nv_wo32(obj, 0x08, flags2);
+	nv_wo32(obj, 0x0c, flags2);
 
-		if (access != NV_DMA_ACCESS_RO)
-			pte_flags |= (1<<1);
-		adjust = offset &  0x00000fff;
-		frame  = offset & ~0x00000fff;
-
-		nv_wo32(*gpuobj,  0, ((1<<12) | (1<<13) | (adjust << 20) |
-				      (access << 14) | (target << 16) |
-				      class));
-		nv_wo32(*gpuobj,  4, size - 1);
-		nv_wo32(*gpuobj,  8, frame | pte_flags);
-		nv_wo32(*gpuobj, 12, frame | pte_flags);
-	} else {
-		uint64_t limit = offset + size - 1;
-		uint32_t flags0, flags5;
-
-		if (target == NV_DMA_TARGET_VIDMEM) {
-			flags0 = 0x00190000;
-			flags5 = 0x00010000;
-		} else {
-			flags0 = 0x7fc00000;
-			flags5 = 0x00080000;
-		}
-
-		nv_wo32(*gpuobj,  0, flags0 | class);
-		nv_wo32(*gpuobj,  4, lower_32_bits(limit));
-		nv_wo32(*gpuobj,  8, lower_32_bits(offset));
-		nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) |
-				      (upper_32_bits(offset) & 0xff));
-		nv_wo32(*gpuobj, 20, flags5);
-	}
-
-	instmem->flush(dev);
-
-	(*gpuobj)->engine = NVOBJ_ENGINE_SW;
-	(*gpuobj)->class  = class;
+	obj->engine = NVOBJ_ENGINE_SW;
+	obj->class  = class;
+	*pobj = obj;
 	return 0;
 }
 
-int
-nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
-			    uint64_t offset, uint64_t size, int access,
-			    struct nouveau_gpuobj **gpuobj,
-			    uint32_t *o_ret)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int ret;
-
-	if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
-	    (dev_priv->card_type >= NV_50 &&
-	     dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
-		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
-					     offset + dev_priv->vm_gart_base,
-					     size, access, NV_DMA_TARGET_AGP,
-					     gpuobj);
-		if (o_ret)
-			*o_ret = 0;
-	} else
-	if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
-		nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj);
-		if (offset & ~0xffffffffULL) {
-			NV_ERROR(dev, "obj offset exceeds 32-bits\n");
-			return -EINVAL;
-		}
-		if (o_ret)
-			*o_ret = (uint32_t)offset;
-		ret = (*gpuobj != NULL) ? 0 : -EINVAL;
-	} else {
-		NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
-		return -EINVAL;
-	}
-
-	return ret;
-}
-
 /* Context objects in the instance RAM have the following structure.
  * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
 
@@ -495,68 +598,13 @@
    entry[5]:
    set to 0?
 */
-int
-nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
-		      struct nouveau_gpuobj **gpuobj)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int ret;
-
-	NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
-
-	ret = nouveau_gpuobj_new(dev, chan,
-				 nouveau_gpuobj_class_instmem_size(dev, class),
-				 16,
-				 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
-				 gpuobj);
-	if (ret) {
-		NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
-		return ret;
-	}
-
-	if (dev_priv->card_type >= NV_50) {
-		nv_wo32(*gpuobj,  0, class);
-		nv_wo32(*gpuobj, 20, 0x00010000);
-	} else {
-		switch (class) {
-		case NV_CLASS_NULL:
-			nv_wo32(*gpuobj, 0, 0x00001030);
-			nv_wo32(*gpuobj, 4, 0xFFFFFFFF);
-			break;
-		default:
-			if (dev_priv->card_type >= NV_40) {
-				nv_wo32(*gpuobj, 0, class);
-#ifdef __BIG_ENDIAN
-				nv_wo32(*gpuobj, 8, 0x01000000);
-#endif
-			} else {
-#ifdef __BIG_ENDIAN
-				nv_wo32(*gpuobj, 0, class | 0x00080000);
-#else
-				nv_wo32(*gpuobj, 0, class);
-#endif
-			}
-		}
-	}
-	dev_priv->engine.instmem.flush(dev);
-
-	(*gpuobj)->engine = NVOBJ_ENGINE_GR;
-	(*gpuobj)->class  = class;
-	return 0;
-}
-
-int
+static int
 nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
 		      struct nouveau_gpuobj **gpuobj_ret)
 {
-	struct drm_nouveau_private *dev_priv;
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
 	struct nouveau_gpuobj *gpuobj;
 
-	if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
-		return -EINVAL;
-	dev_priv = chan->dev->dev_private;
-
 	gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
 	if (!gpuobj)
 		return -ENOMEM;
@@ -573,6 +621,101 @@
 	return 0;
 }
 
+int
+nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
+{
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+	struct drm_device *dev = chan->dev;
+	struct nouveau_gpuobj_class *oc;
+	struct nouveau_gpuobj *gpuobj;
+	int ret;
+
+	NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
+
+	list_for_each_entry(oc, &dev_priv->classes, head) {
+		if (oc->id == class)
+			goto found;
+	}
+
+	NV_ERROR(dev, "illegal object class: 0x%x\n", class);
+	return -EINVAL;
+
+found:
+	switch (oc->engine) {
+	case NVOBJ_ENGINE_SW:
+		ret = nouveau_gpuobj_sw_new(chan, class, &gpuobj);
+		if (ret)
+			return ret;
+		goto insert;
+	case NVOBJ_ENGINE_GR:
+		if (dev_priv->card_type >= NV_50 && !chan->ramin_grctx) {
+			struct nouveau_pgraph_engine *pgraph =
+				&dev_priv->engine.graph;
+
+			ret = pgraph->create_context(chan);
+			if (ret)
+				return ret;
+		}
+		break;
+	case NVOBJ_ENGINE_CRYPT:
+		if (!chan->crypt_ctx) {
+			struct nouveau_crypt_engine *pcrypt =
+				&dev_priv->engine.crypt;
+
+			ret = pcrypt->create_context(chan);
+			if (ret)
+				return ret;
+		}
+		break;
+	}
+
+	ret = nouveau_gpuobj_new(dev, chan,
+				 nouveau_gpuobj_class_instmem_size(dev, class),
+				 16,
+				 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
+				 &gpuobj);
+	if (ret) {
+		NV_ERROR(dev, "error creating gpuobj: %d\n", ret);
+		return ret;
+	}
+
+	if (dev_priv->card_type >= NV_50) {
+		nv_wo32(gpuobj,  0, class);
+		nv_wo32(gpuobj, 20, 0x00010000);
+	} else {
+		switch (class) {
+		case NV_CLASS_NULL:
+			nv_wo32(gpuobj, 0, 0x00001030);
+			nv_wo32(gpuobj, 4, 0xFFFFFFFF);
+			break;
+		default:
+			if (dev_priv->card_type >= NV_40) {
+				nv_wo32(gpuobj, 0, class);
+#ifdef __BIG_ENDIAN
+				nv_wo32(gpuobj, 8, 0x01000000);
+#endif
+			} else {
+#ifdef __BIG_ENDIAN
+				nv_wo32(gpuobj, 0, class | 0x00080000);
+#else
+				nv_wo32(gpuobj, 0, class);
+#endif
+			}
+		}
+	}
+	dev_priv->engine.instmem.flush(dev);
+
+	gpuobj->engine = oc->engine;
+	gpuobj->class  = oc->id;
+
+insert:
+	ret = nouveau_ramht_insert(chan, handle, gpuobj);
+	if (ret)
+		NV_ERROR(dev, "error adding gpuobj to RAMHT: %d\n", ret);
+	nouveau_gpuobj_ref(NULL, &gpuobj);
+	return ret;
+}
+
 static int
 nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
 {
@@ -585,7 +728,7 @@
 	NV_DEBUG(dev, "ch%d\n", chan->id);
 
 	/* Base amount for object storage (4KiB enough?) */
-	size = 0x1000;
+	size = 0x2000;
 	base = 0;
 
 	/* PGRAPH context */
@@ -624,9 +767,8 @@
 {
 	struct drm_device *dev = chan->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
 	struct nouveau_gpuobj *vram = NULL, *tt = NULL;
-	int ret, i;
+	int ret;
 
 	NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
 
@@ -637,16 +779,14 @@
 		return ret;
 	}
 
-	/* NV50 VM
+	/* NV50/NVC0 VM
 	 *  - Allocate per-channel page-directory
-	 *  - Map GART and VRAM into the channel's address space at the
-	 *    locations determined during init.
+	 *  - Link with shared channel VM
 	 */
-	if (dev_priv->card_type >= NV_50) {
+	if (dev_priv->chan_vm) {
 		u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
 		u64 vm_vinst = chan->ramin->vinst + pgd_offs;
 		u32 vm_pinst = chan->ramin->pinst;
-		u32 pde;
 
 		if (vm_pinst != ~0)
 			vm_pinst += pgd_offs;
@@ -655,29 +795,8 @@
 					      0, &chan->vm_pd);
 		if (ret)
 			return ret;
-		for (i = 0; i < 0x4000; i += 8) {
-			nv_wo32(chan->vm_pd, i + 0, 0x00000000);
-			nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
-		}
 
-		nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma,
-				   &chan->vm_gart_pt);
-		pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
-		nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3);
-		nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
-
-		pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
-		for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
-			nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i],
-					   &chan->vm_vram_pt[i]);
-
-			nv_wo32(chan->vm_pd, pde + 0,
-				chan->vm_vram_pt[i]->vinst | 0x61);
-			nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
-			pde += 8;
-		}
-
-		instmem->flush(dev);
+		nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd);
 	}
 
 	/* RAMHT */
@@ -700,9 +819,8 @@
 	/* VRAM ctxdma */
 	if (dev_priv->card_type >= NV_50) {
 		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
-					     0, dev_priv->vm_end,
-					     NV_DMA_ACCESS_RW,
-					     NV_DMA_TARGET_AGP, &vram);
+					     0, (1ULL << 40), NV_MEM_ACCESS_RW,
+					     NV_MEM_TARGET_VM, &vram);
 		if (ret) {
 			NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
 			return ret;
@@ -710,8 +828,8 @@
 	} else {
 		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
 					     0, dev_priv->fb_available_size,
-					     NV_DMA_ACCESS_RW,
-					     NV_DMA_TARGET_VIDMEM, &vram);
+					     NV_MEM_ACCESS_RW,
+					     NV_MEM_TARGET_VRAM, &vram);
 		if (ret) {
 			NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
 			return ret;
@@ -728,21 +846,13 @@
 	/* TT memory ctxdma */
 	if (dev_priv->card_type >= NV_50) {
 		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
-					     0, dev_priv->vm_end,
-					     NV_DMA_ACCESS_RW,
-					     NV_DMA_TARGET_AGP, &tt);
-		if (ret) {
-			NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
-			return ret;
-		}
-	} else
-	if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
-		ret = nouveau_gpuobj_gart_dma_new(chan, 0,
-						  dev_priv->gart_info.aper_size,
-						  NV_DMA_ACCESS_RW, &tt, NULL);
+					     0, (1ULL << 40), NV_MEM_ACCESS_RW,
+					     NV_MEM_TARGET_VM, &tt);
 	} else {
-		NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
-		ret = -EINVAL;
+		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+					     0, dev_priv->gart_info.aper_size,
+					     NV_MEM_ACCESS_RW,
+					     NV_MEM_TARGET_GART, &tt);
 	}
 
 	if (ret) {
@@ -763,9 +873,7 @@
 void
 nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
 {
-	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
 	struct drm_device *dev = chan->dev;
-	int i;
 
 	NV_DEBUG(dev, "ch%d\n", chan->id);
 
@@ -774,10 +882,8 @@
 
 	nouveau_ramht_ref(NULL, &chan->ramht, chan);
 
+	nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
 	nouveau_gpuobj_ref(NULL, &chan->vm_pd);
-	nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
-	for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
-		nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
 
 	if (chan->ramin_heap.free_stack.next)
 		drm_mm_takedown(&chan->ramin_heap);
@@ -791,147 +897,91 @@
 	struct nouveau_gpuobj *gpuobj;
 	int i;
 
-	if (dev_priv->card_type < NV_50) {
-		dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram);
-		if (!dev_priv->susres.ramin_copy)
-			return -ENOMEM;
-
-		for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
-			dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i);
-		return 0;
-	}
-
 	list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
-		if (!gpuobj->im_backing)
+		if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)
 			continue;
 
-		gpuobj->im_backing_suspend = vmalloc(gpuobj->size);
-		if (!gpuobj->im_backing_suspend) {
+		gpuobj->suspend = vmalloc(gpuobj->size);
+		if (!gpuobj->suspend) {
 			nouveau_gpuobj_resume(dev);
 			return -ENOMEM;
 		}
 
 		for (i = 0; i < gpuobj->size; i += 4)
-			gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i);
+			gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);
 	}
 
 	return 0;
 }
 
 void
-nouveau_gpuobj_suspend_cleanup(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *gpuobj;
-
-	if (dev_priv->card_type < NV_50) {
-		vfree(dev_priv->susres.ramin_copy);
-		dev_priv->susres.ramin_copy = NULL;
-		return;
-	}
-
-	list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
-		if (!gpuobj->im_backing_suspend)
-			continue;
-
-		vfree(gpuobj->im_backing_suspend);
-		gpuobj->im_backing_suspend = NULL;
-	}
-}
-
-void
 nouveau_gpuobj_resume(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_gpuobj *gpuobj;
 	int i;
 
-	if (dev_priv->card_type < NV_50) {
-		for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
-			nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]);
-		nouveau_gpuobj_suspend_cleanup(dev);
-		return;
-	}
-
 	list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
-		if (!gpuobj->im_backing_suspend)
+		if (!gpuobj->suspend)
 			continue;
 
 		for (i = 0; i < gpuobj->size; i += 4)
-			nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]);
-		dev_priv->engine.instmem.flush(dev);
+			nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);
+
+		vfree(gpuobj->suspend);
+		gpuobj->suspend = NULL;
 	}
 
-	nouveau_gpuobj_suspend_cleanup(dev);
+	dev_priv->engine.instmem.flush(dev);
 }
 
 int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_grobj_alloc *init = data;
-	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
-	struct nouveau_pgraph_object_class *grc;
-	struct nouveau_gpuobj *gr = NULL;
 	struct nouveau_channel *chan;
 	int ret;
 
-	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
-
 	if (init->handle == ~0)
 		return -EINVAL;
 
-	grc = pgraph->grclass;
-	while (grc->id) {
-		if (grc->id == init->class)
-			break;
-		grc++;
+	chan = nouveau_channel_get(dev, file_priv, init->channel);
+	if (IS_ERR(chan))
+		return PTR_ERR(chan);
+
+	if (nouveau_ramht_find(chan, init->handle)) {
+		ret = -EEXIST;
+		goto out;
 	}
 
-	if (!grc->id) {
-		NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class);
-		return -EPERM;
-	}
-
-	if (nouveau_ramht_find(chan, init->handle))
-		return -EEXIST;
-
-	if (!grc->software)
-		ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
-	else
-		ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
+	ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);
 	if (ret) {
 		NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
 			 ret, init->channel, init->handle);
-		return ret;
 	}
 
-	ret = nouveau_ramht_insert(chan, init->handle, gr);
-	nouveau_gpuobj_ref(NULL, &gr);
-	if (ret) {
-		NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
-			 ret, init->channel, init->handle);
-		return ret;
-	}
-
-	return 0;
+out:
+	nouveau_channel_put(&chan);
+	return ret;
 }
 
 int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv)
 {
 	struct drm_nouveau_gpuobj_free *objfree = data;
-	struct nouveau_gpuobj *gpuobj;
 	struct nouveau_channel *chan;
+	int ret;
 
-	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
+	chan = nouveau_channel_get(dev, file_priv, objfree->channel);
+	if (IS_ERR(chan))
+		return PTR_ERR(chan);
 
-	gpuobj = nouveau_ramht_find(chan, objfree->handle);
-	if (!gpuobj)
-		return -ENOENT;
+	/* Synchronize with the user channel */
+	nouveau_channel_idle(chan);
 
-	nouveau_ramht_remove(chan, objfree->handle);
-	return 0;
+	ret = nouveau_ramht_remove(chan, objfree->handle);
+	nouveau_channel_put(&chan);
+	return ret;
 }
 
 u32
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 9f7b158..d938141 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -27,6 +27,10 @@
 #include "nouveau_drv.h"
 #include "nouveau_pm.h"
 
+#ifdef CONFIG_ACPI
+#include <linux/acpi.h>
+#endif
+#include <linux/power_supply.h>
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
 
@@ -446,6 +450,25 @@
 #endif
 }
 
+#ifdef CONFIG_ACPI
+static int
+nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data)
+{
+	struct drm_nouveau_private *dev_priv =
+		container_of(nb, struct drm_nouveau_private, engine.pm.acpi_nb);
+	struct drm_device *dev = dev_priv->dev;
+	struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
+
+	if (strcmp(entry->device_class, "ac_adapter") == 0) {
+		bool ac = power_supply_is_system_supplied();
+
+		NV_DEBUG(dev, "power supply changed: %s\n", ac ? "AC" : "DC");
+	}
+
+	return NOTIFY_OK;
+}
+#endif
+
 int
 nouveau_pm_init(struct drm_device *dev)
 {
@@ -485,6 +508,10 @@
 
 	nouveau_sysfs_init(dev);
 	nouveau_hwmon_init(dev);
+#ifdef CONFIG_ACPI
+	pm->acpi_nb.notifier_call = nouveau_pm_acpi_event;
+	register_acpi_notifier(&pm->acpi_nb);
+#endif
 
 	return 0;
 }
@@ -503,6 +530,9 @@
 	nouveau_perf_fini(dev);
 	nouveau_volt_fini(dev);
 
+#ifdef CONFIG_ACPI
+	unregister_acpi_notifier(&pm->acpi_nb);
+#endif
 	nouveau_hwmon_fini(dev);
 	nouveau_sysfs_fini(dev);
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c
index 2d85809..bef3e69 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c
@@ -104,17 +104,17 @@
 	nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
 
 	if (dev_priv->card_type < NV_40) {
-		ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->cinst >> 4) |
+		ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->pinst >> 4) |
 		      (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
 		      (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
 	} else
 	if (dev_priv->card_type < NV_50) {
-		ctx = (gpuobj->cinst >> 4) |
+		ctx = (gpuobj->pinst >> 4) |
 		      (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
 		      (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
 	} else {
 		if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
-			ctx = (gpuobj->cinst << 10) | 2;
+			ctx = (gpuobj->cinst << 10) | chan->id;
 		} else {
 			ctx = (gpuobj->cinst >> 4) |
 			      ((gpuobj->engine <<
@@ -214,18 +214,19 @@
 	spin_unlock_irqrestore(&chan->ramht->lock, flags);
 }
 
-void
+int
 nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
 {
 	struct nouveau_ramht_entry *entry;
 
 	entry = nouveau_ramht_remove_entry(chan, handle);
 	if (!entry)
-		return;
+		return -ENOENT;
 
 	nouveau_ramht_remove_hash(chan, entry->handle);
 	nouveau_gpuobj_ref(NULL, &entry->gpuobj);
 	kfree(entry);
+	return 0;
 }
 
 struct nouveau_gpuobj *
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.h b/drivers/gpu/drm/nouveau/nouveau_ramht.h
index b79cb5e..c82de98 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.h
@@ -48,7 +48,7 @@
 
 extern int  nouveau_ramht_insert(struct nouveau_channel *, u32 handle,
 				 struct nouveau_gpuobj *);
-extern void nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
+extern int  nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
 extern struct nouveau_gpuobj *
 nouveau_ramht_find(struct nouveau_channel *chan, u32 handle);
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index 1b42541..04e8fb7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -45,6 +45,11 @@
 #	define NV04_PFB_REF_CMD_REFRESH				(1 << 0)
 #define NV04_PFB_PRE						0x001002d4
 #	define NV04_PFB_PRE_CMD_PRECHARGE			(1 << 0)
+#define NV20_PFB_ZCOMP(i)                              (0x00100300 + 4*(i))
+#	define NV20_PFB_ZCOMP_MODE_32				(4 << 24)
+#	define NV20_PFB_ZCOMP_EN				(1 << 31)
+#	define NV25_PFB_ZCOMP_MODE_16				(1 << 20)
+#	define NV25_PFB_ZCOMP_MODE_32				(2 << 20)
 #define NV10_PFB_CLOSE_PAGE2					0x0010033c
 #define NV04_PFB_SCRAMBLE(i)                         (0x00100400 + 4 * (i))
 #define NV40_PFB_TILE(i)                              (0x00100600 + (i*16))
@@ -74,17 +79,6 @@
 #    define NV40_RAMHT_CONTEXT_ENGINE_SHIFT                20
 #    define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT              0
 
-/* DMA object defines */
-#define NV_DMA_ACCESS_RW 0
-#define NV_DMA_ACCESS_RO 1
-#define NV_DMA_ACCESS_WO 2
-#define NV_DMA_TARGET_VIDMEM 0
-#define NV_DMA_TARGET_PCI    2
-#define NV_DMA_TARGET_AGP    3
-/* The following is not a real value used by the card, it's changed by
- * nouveau_object_dma_create */
-#define NV_DMA_TARGET_PCI_NONLINEAR 8
-
 /* Some object classes we care about in the drm */
 #define NV_CLASS_DMA_FROM_MEMORY                           0x00000002
 #define NV_CLASS_DMA_TO_MEMORY                             0x00000003
@@ -332,6 +326,7 @@
 #define NV04_PGRAPH_BSWIZZLE5                              0x004006A0
 #define NV03_PGRAPH_STATUS                                 0x004006B0
 #define NV04_PGRAPH_STATUS                                 0x00400700
+#    define NV40_PGRAPH_STATUS_SYNC_STALL                  0x00004000
 #define NV04_PGRAPH_TRAPPED_ADDR                           0x00400704
 #define NV04_PGRAPH_TRAPPED_DATA                           0x00400708
 #define NV04_PGRAPH_SURFACE                                0x0040070C
@@ -378,6 +373,7 @@
 #define NV20_PGRAPH_TLIMIT(i)                              (0x00400904 + (i*16))
 #define NV20_PGRAPH_TSIZE(i)                               (0x00400908 + (i*16))
 #define NV20_PGRAPH_TSTATUS(i)                             (0x0040090C + (i*16))
+#define NV20_PGRAPH_ZCOMP(i)                               (0x00400980 + 4*(i))
 #define NV10_PGRAPH_TILE(i)                                (0x00400B00 + (i*16))
 #define NV10_PGRAPH_TLIMIT(i)                              (0x00400B04 + (i*16))
 #define NV10_PGRAPH_TSIZE(i)                               (0x00400B08 + (i*16))
@@ -714,31 +710,32 @@
 #define NV50_PDISPLAY_INTR_1_CLK_UNK10                               0x00000010
 #define NV50_PDISPLAY_INTR_1_CLK_UNK20                               0x00000020
 #define NV50_PDISPLAY_INTR_1_CLK_UNK40                               0x00000040
-#define NV50_PDISPLAY_INTR_EN                                        0x0061002c
-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC                            0x0000000c
-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(n)                   (1 << ((n) + 2))
-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_0                          0x00000004
-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_1                          0x00000008
-#define NV50_PDISPLAY_INTR_EN_CLK_UNK10                              0x00000010
-#define NV50_PDISPLAY_INTR_EN_CLK_UNK20                              0x00000020
-#define NV50_PDISPLAY_INTR_EN_CLK_UNK40                              0x00000040
+#define NV50_PDISPLAY_INTR_EN_0                                      0x00610028
+#define NV50_PDISPLAY_INTR_EN_1                                      0x0061002c
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC                          0x0000000c
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(n)                 (1 << ((n) + 2))
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_0                        0x00000004
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_1                        0x00000008
+#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK10                            0x00000010
+#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK20                            0x00000020
+#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK40                            0x00000040
 #define NV50_PDISPLAY_UNK30_CTRL                                     0x00610030
 #define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK0                        0x00000200
 #define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK1                        0x00000400
 #define NV50_PDISPLAY_UNK30_CTRL_PENDING                             0x80000000
-#define NV50_PDISPLAY_TRAPPED_ADDR                                   0x00610080
-#define NV50_PDISPLAY_TRAPPED_DATA                                   0x00610084
-#define NV50_PDISPLAY_CHANNEL_STAT(i)                  ((i) * 0x10 + 0x00610200)
-#define NV50_PDISPLAY_CHANNEL_STAT_DMA                               0x00000010
-#define NV50_PDISPLAY_CHANNEL_STAT_DMA_DISABLED                      0x00000000
-#define NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED                       0x00000010
-#define NV50_PDISPLAY_CHANNEL_DMA_CB(i)                ((i) * 0x10 + 0x00610204)
-#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION                        0x00000002
-#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM                   0x00000000
-#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_SYSTEM                 0x00000002
-#define NV50_PDISPLAY_CHANNEL_DMA_CB_VALID                           0x00000001
-#define NV50_PDISPLAY_CHANNEL_UNK2(i)                  ((i) * 0x10 + 0x00610208)
-#define NV50_PDISPLAY_CHANNEL_UNK3(i)                  ((i) * 0x10 + 0x0061020c)
+#define NV50_PDISPLAY_TRAPPED_ADDR(i)                  ((i) * 0x08 + 0x00610080)
+#define NV50_PDISPLAY_TRAPPED_DATA(i)                  ((i) * 0x08 + 0x00610084)
+#define NV50_PDISPLAY_EVO_CTRL(i)                      ((i) * 0x10 + 0x00610200)
+#define NV50_PDISPLAY_EVO_CTRL_DMA                                   0x00000010
+#define NV50_PDISPLAY_EVO_CTRL_DMA_DISABLED                          0x00000000
+#define NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED                           0x00000010
+#define NV50_PDISPLAY_EVO_DMA_CB(i)                    ((i) * 0x10 + 0x00610204)
+#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION                            0x00000002
+#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM                       0x00000000
+#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_SYSTEM                     0x00000002
+#define NV50_PDISPLAY_EVO_DMA_CB_VALID                               0x00000001
+#define NV50_PDISPLAY_EVO_UNK2(i)                      ((i) * 0x10 + 0x00610208)
+#define NV50_PDISPLAY_EVO_HASH_TAG(i)                  ((i) * 0x10 + 0x0061020c)
 
 #define NV50_PDISPLAY_CURSOR                                         0x00610270
 #define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)           ((i) * 0x10 + 0x00610270)
@@ -746,15 +743,11 @@
 #define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS                     0x00030000
 #define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE              0x00010000
 
-#define NV50_PDISPLAY_CTRL_STATE                                     0x00610300
-#define NV50_PDISPLAY_CTRL_STATE_PENDING                             0x80000000
-#define NV50_PDISPLAY_CTRL_STATE_METHOD                              0x00001ffc
-#define NV50_PDISPLAY_CTRL_STATE_ENABLE                              0x00000001
-#define NV50_PDISPLAY_CTRL_VAL                                       0x00610304
-#define NV50_PDISPLAY_UNK_380                                        0x00610380
-#define NV50_PDISPLAY_RAM_AMOUNT                                     0x00610384
-#define NV50_PDISPLAY_UNK_388                                        0x00610388
-#define NV50_PDISPLAY_UNK_38C                                        0x0061038c
+#define NV50_PDISPLAY_PIO_CTRL                                       0x00610300
+#define NV50_PDISPLAY_PIO_CTRL_PENDING                               0x80000000
+#define NV50_PDISPLAY_PIO_CTRL_MTHD                                  0x00001ffc
+#define NV50_PDISPLAY_PIO_CTRL_ENABLED                               0x00000001
+#define NV50_PDISPLAY_PIO_DATA                                       0x00610304
 
 #define NV50_PDISPLAY_CRTC_P(i, r)        ((i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
 #define NV50_PDISPLAY_CRTC_C(i, r)    (4 + (i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index d4ac970..9a250eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -14,7 +14,7 @@
 	dma_addr_t *pages;
 	unsigned nr_pages;
 
-	unsigned pte_start;
+	u64 offset;
 	bool bound;
 };
 
@@ -74,18 +74,6 @@
 	}
 }
 
-static inline unsigned
-nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
-
-	if (dev_priv->card_type < NV_50)
-		return pte + 2;
-
-	return pte << 1;
-}
-
 static int
 nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
 {
@@ -97,32 +85,17 @@
 
 	NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
 
-	pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT);
-	nvbe->pte_start = pte;
+	nvbe->offset = mem->start << PAGE_SHIFT;
+	pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
 	for (i = 0; i < nvbe->nr_pages; i++) {
 		dma_addr_t dma_offset = nvbe->pages[i];
 		uint32_t offset_l = lower_32_bits(dma_offset);
-		uint32_t offset_h = upper_32_bits(dma_offset);
 
-		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
-			if (dev_priv->card_type < NV_50) {
-				nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
-				pte += 1;
-			} else {
-				nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21);
-				nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff);
-				pte += 2;
-			}
-
+		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
+			nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
 			dma_offset += NV_CTXDMA_PAGE_SIZE;
 		}
 	}
-	dev_priv->engine.instmem.flush(nvbe->dev);
-
-	if (dev_priv->card_type == NV_50) {
-		dev_priv->engine.fifo.tlb_flush(dev);
-		dev_priv->engine.graph.tlb_flush(dev);
-	}
 
 	nvbe->bound = true;
 	return 0;
@@ -142,28 +115,10 @@
 	if (!nvbe->bound)
 		return 0;
 
-	pte = nvbe->pte_start;
+	pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
 	for (i = 0; i < nvbe->nr_pages; i++) {
-		dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
-
-		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
-			if (dev_priv->card_type < NV_50) {
-				nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3);
-				pte += 1;
-			} else {
-				nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
-				nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000);
-				pte += 2;
-			}
-
-			dma_offset += NV_CTXDMA_PAGE_SIZE;
-		}
-	}
-	dev_priv->engine.instmem.flush(nvbe->dev);
-
-	if (dev_priv->card_type == NV_50) {
-		dev_priv->engine.fifo.tlb_flush(dev);
-		dev_priv->engine.graph.tlb_flush(dev);
+		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
+			nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
 	}
 
 	nvbe->bound = false;
@@ -186,6 +141,35 @@
 	}
 }
 
+static int
+nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+{
+	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+
+	nvbe->offset = mem->start << PAGE_SHIFT;
+
+	nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
+			  nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
+	nvbe->bound = true;
+	return 0;
+}
+
+static int
+nv50_sgdma_unbind(struct ttm_backend *be)
+{
+	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+
+	if (!nvbe->bound)
+		return 0;
+
+	nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
+			    nvbe->nr_pages << PAGE_SHIFT);
+	nvbe->bound = false;
+	return 0;
+}
+
 static struct ttm_backend_func nouveau_sgdma_backend = {
 	.populate		= nouveau_sgdma_populate,
 	.clear			= nouveau_sgdma_clear,
@@ -194,23 +178,30 @@
 	.destroy		= nouveau_sgdma_destroy
 };
 
+static struct ttm_backend_func nv50_sgdma_backend = {
+	.populate		= nouveau_sgdma_populate,
+	.clear			= nouveau_sgdma_clear,
+	.bind			= nv50_sgdma_bind,
+	.unbind			= nv50_sgdma_unbind,
+	.destroy		= nouveau_sgdma_destroy
+};
+
 struct ttm_backend *
 nouveau_sgdma_init_ttm(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_sgdma_be *nvbe;
 
-	if (!dev_priv->gart_info.sg_ctxdma)
-		return NULL;
-
 	nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
 	if (!nvbe)
 		return NULL;
 
 	nvbe->dev = dev;
 
-	nvbe->backend.func	= &nouveau_sgdma_backend;
-
+	if (dev_priv->card_type < NV_50)
+		nvbe->backend.func = &nouveau_sgdma_backend;
+	else
+		nvbe->backend.func = &nv50_sgdma_backend;
 	return &nvbe->backend;
 }
 
@@ -218,7 +209,6 @@
 nouveau_sgdma_init(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct pci_dev *pdev = dev->pdev;
 	struct nouveau_gpuobj *gpuobj = NULL;
 	uint32_t aper_size, obj_size;
 	int i, ret;
@@ -231,68 +221,40 @@
 
 		obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
 		obj_size += 8; /* ctxdma header */
-	} else {
-		/* 1 entire VM page table */
-		aper_size = (512 * 1024 * 1024);
-		obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
-	}
 
-	ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
-				      NVOBJ_FLAG_ZERO_ALLOC |
-				      NVOBJ_FLAG_ZERO_FREE, &gpuobj);
-	if (ret) {
-		NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
-		return ret;
-	}
+		ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
+					      NVOBJ_FLAG_ZERO_ALLOC |
+					      NVOBJ_FLAG_ZERO_FREE, &gpuobj);
+		if (ret) {
+			NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
+			return ret;
+		}
 
-	dev_priv->gart_info.sg_dummy_page =
-		alloc_page(GFP_KERNEL|__GFP_DMA32|__GFP_ZERO);
-	if (!dev_priv->gart_info.sg_dummy_page) {
-		nouveau_gpuobj_ref(NULL, &gpuobj);
-		return -ENOMEM;
-	}
-
-	set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
-	dev_priv->gart_info.sg_dummy_bus =
-		pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
-			     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-	if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
-		nouveau_gpuobj_ref(NULL, &gpuobj);
-		return -EFAULT;
-	}
-
-	if (dev_priv->card_type < NV_50) {
-		/* special case, allocated from global instmem heap so
-		 * cinst is invalid, we use it on all channels though so
-		 * cinst needs to be valid, set it the same as pinst
-		 */
-		gpuobj->cinst = gpuobj->pinst;
-
-		/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
-		 * confirmed to work on c51.  Perhaps means NV_DMA_TARGET_PCIE
-		 * on those cards? */
 		nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
 				   (1 << 12) /* PT present */ |
 				   (0 << 13) /* PT *not* linear */ |
-				   (NV_DMA_ACCESS_RW  << 14) |
-				   (NV_DMA_TARGET_PCI << 16));
+				   (0 << 14) /* RW */ |
+				   (2 << 16) /* PCI */);
 		nv_wo32(gpuobj, 4, aper_size - 1);
-		for (i = 2; i < 2 + (aper_size >> 12); i++) {
-			nv_wo32(gpuobj, i * 4,
-				dev_priv->gart_info.sg_dummy_bus | 3);
-		}
-	} else {
-		for (i = 0; i < obj_size; i += 8) {
-			nv_wo32(gpuobj, i + 0, 0x00000000);
-			nv_wo32(gpuobj, i + 4, 0x00000000);
-		}
+		for (i = 2; i < 2 + (aper_size >> 12); i++)
+			nv_wo32(gpuobj, i * 4, 0x00000000);
+
+		dev_priv->gart_info.sg_ctxdma = gpuobj;
+		dev_priv->gart_info.aper_base = 0;
+		dev_priv->gart_info.aper_size = aper_size;
+	} else
+	if (dev_priv->chan_vm) {
+		ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
+				     12, NV_MEM_ACCESS_RW,
+				     &dev_priv->gart_info.vma);
+		if (ret)
+			return ret;
+
+		dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
+		dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
 	}
-	dev_priv->engine.instmem.flush(dev);
 
 	dev_priv->gart_info.type      = NOUVEAU_GART_SGDMA;
-	dev_priv->gart_info.aper_base = 0;
-	dev_priv->gart_info.aper_size = aper_size;
-	dev_priv->gart_info.sg_ctxdma = gpuobj;
 	return 0;
 }
 
@@ -301,31 +263,19 @@
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 
-	if (dev_priv->gart_info.sg_dummy_page) {
-		pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
-			       NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-		unlock_page(dev_priv->gart_info.sg_dummy_page);
-		__free_page(dev_priv->gart_info.sg_dummy_page);
-		dev_priv->gart_info.sg_dummy_page = NULL;
-		dev_priv->gart_info.sg_dummy_bus = 0;
-	}
-
 	nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
+	nouveau_vm_put(&dev_priv->gart_info.vma);
 }
 
-int
-nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
+uint32_t
+nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
-	int pte;
+	int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
 
-	pte = (offset >> NV_CTXDMA_PAGE_SHIFT) << 2;
-	if (dev_priv->card_type < NV_50) {
-		*page = nv_ro32(gpuobj, (pte + 8)) & ~NV_CTXDMA_PAGE_MASK;
-		return 0;
-	}
+	BUG_ON(dev_priv->card_type >= NV_50);
 
-	NV_ERROR(dev, "Unimplemented on NV50\n");
-	return -EINVAL;
+	return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
+		(offset & NV_CTXDMA_PAGE_MASK);
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 049f755..8eac943 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -53,10 +53,10 @@
 		engine->instmem.takedown	= nv04_instmem_takedown;
 		engine->instmem.suspend		= nv04_instmem_suspend;
 		engine->instmem.resume		= nv04_instmem_resume;
-		engine->instmem.populate	= nv04_instmem_populate;
-		engine->instmem.clear		= nv04_instmem_clear;
-		engine->instmem.bind		= nv04_instmem_bind;
-		engine->instmem.unbind		= nv04_instmem_unbind;
+		engine->instmem.get		= nv04_instmem_get;
+		engine->instmem.put		= nv04_instmem_put;
+		engine->instmem.map		= nv04_instmem_map;
+		engine->instmem.unmap		= nv04_instmem_unmap;
 		engine->instmem.flush		= nv04_instmem_flush;
 		engine->mc.init			= nv04_mc_init;
 		engine->mc.takedown		= nv04_mc_takedown;
@@ -65,7 +65,6 @@
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->fb.init			= nv04_fb_init;
 		engine->fb.takedown		= nv04_fb_takedown;
-		engine->graph.grclass		= nv04_graph_grclass;
 		engine->graph.init		= nv04_graph_init;
 		engine->graph.takedown		= nv04_graph_takedown;
 		engine->graph.fifo_access	= nv04_graph_fifo_access;
@@ -76,7 +75,7 @@
 		engine->graph.unload_context	= nv04_graph_unload_context;
 		engine->fifo.channels		= 16;
 		engine->fifo.init		= nv04_fifo_init;
-		engine->fifo.takedown		= nouveau_stub_takedown;
+		engine->fifo.takedown		= nv04_fifo_fini;
 		engine->fifo.disable		= nv04_fifo_disable;
 		engine->fifo.enable		= nv04_fifo_enable;
 		engine->fifo.reassign		= nv04_fifo_reassign;
@@ -99,16 +98,20 @@
 		engine->pm.clock_get		= nv04_pm_clock_get;
 		engine->pm.clock_pre		= nv04_pm_clock_pre;
 		engine->pm.clock_set		= nv04_pm_clock_set;
+		engine->crypt.init		= nouveau_stub_init;
+		engine->crypt.takedown		= nouveau_stub_takedown;
+		engine->vram.init		= nouveau_mem_detect;
+		engine->vram.flags_valid	= nouveau_mem_flags_valid;
 		break;
 	case 0x10:
 		engine->instmem.init		= nv04_instmem_init;
 		engine->instmem.takedown	= nv04_instmem_takedown;
 		engine->instmem.suspend		= nv04_instmem_suspend;
 		engine->instmem.resume		= nv04_instmem_resume;
-		engine->instmem.populate	= nv04_instmem_populate;
-		engine->instmem.clear		= nv04_instmem_clear;
-		engine->instmem.bind		= nv04_instmem_bind;
-		engine->instmem.unbind		= nv04_instmem_unbind;
+		engine->instmem.get		= nv04_instmem_get;
+		engine->instmem.put		= nv04_instmem_put;
+		engine->instmem.map		= nv04_instmem_map;
+		engine->instmem.unmap		= nv04_instmem_unmap;
 		engine->instmem.flush		= nv04_instmem_flush;
 		engine->mc.init			= nv04_mc_init;
 		engine->mc.takedown		= nv04_mc_takedown;
@@ -117,8 +120,9 @@
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->fb.init			= nv10_fb_init;
 		engine->fb.takedown		= nv10_fb_takedown;
-		engine->fb.set_region_tiling	= nv10_fb_set_region_tiling;
-		engine->graph.grclass		= nv10_graph_grclass;
+		engine->fb.init_tile_region	= nv10_fb_init_tile_region;
+		engine->fb.set_tile_region	= nv10_fb_set_tile_region;
+		engine->fb.free_tile_region	= nv10_fb_free_tile_region;
 		engine->graph.init		= nv10_graph_init;
 		engine->graph.takedown		= nv10_graph_takedown;
 		engine->graph.channel		= nv10_graph_channel;
@@ -127,17 +131,17 @@
 		engine->graph.fifo_access	= nv04_graph_fifo_access;
 		engine->graph.load_context	= nv10_graph_load_context;
 		engine->graph.unload_context	= nv10_graph_unload_context;
-		engine->graph.set_region_tiling	= nv10_graph_set_region_tiling;
+		engine->graph.set_tile_region	= nv10_graph_set_tile_region;
 		engine->fifo.channels		= 32;
 		engine->fifo.init		= nv10_fifo_init;
-		engine->fifo.takedown		= nouveau_stub_takedown;
+		engine->fifo.takedown		= nv04_fifo_fini;
 		engine->fifo.disable		= nv04_fifo_disable;
 		engine->fifo.enable		= nv04_fifo_enable;
 		engine->fifo.reassign		= nv04_fifo_reassign;
 		engine->fifo.cache_pull		= nv04_fifo_cache_pull;
 		engine->fifo.channel_id		= nv10_fifo_channel_id;
 		engine->fifo.create_context	= nv10_fifo_create_context;
-		engine->fifo.destroy_context	= nv10_fifo_destroy_context;
+		engine->fifo.destroy_context	= nv04_fifo_destroy_context;
 		engine->fifo.load_context	= nv10_fifo_load_context;
 		engine->fifo.unload_context	= nv10_fifo_unload_context;
 		engine->display.early_init	= nv04_display_early_init;
@@ -153,16 +157,20 @@
 		engine->pm.clock_get		= nv04_pm_clock_get;
 		engine->pm.clock_pre		= nv04_pm_clock_pre;
 		engine->pm.clock_set		= nv04_pm_clock_set;
+		engine->crypt.init		= nouveau_stub_init;
+		engine->crypt.takedown		= nouveau_stub_takedown;
+		engine->vram.init		= nouveau_mem_detect;
+		engine->vram.flags_valid	= nouveau_mem_flags_valid;
 		break;
 	case 0x20:
 		engine->instmem.init		= nv04_instmem_init;
 		engine->instmem.takedown	= nv04_instmem_takedown;
 		engine->instmem.suspend		= nv04_instmem_suspend;
 		engine->instmem.resume		= nv04_instmem_resume;
-		engine->instmem.populate	= nv04_instmem_populate;
-		engine->instmem.clear		= nv04_instmem_clear;
-		engine->instmem.bind		= nv04_instmem_bind;
-		engine->instmem.unbind		= nv04_instmem_unbind;
+		engine->instmem.get		= nv04_instmem_get;
+		engine->instmem.put		= nv04_instmem_put;
+		engine->instmem.map		= nv04_instmem_map;
+		engine->instmem.unmap		= nv04_instmem_unmap;
 		engine->instmem.flush		= nv04_instmem_flush;
 		engine->mc.init			= nv04_mc_init;
 		engine->mc.takedown		= nv04_mc_takedown;
@@ -171,8 +179,9 @@
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->fb.init			= nv10_fb_init;
 		engine->fb.takedown		= nv10_fb_takedown;
-		engine->fb.set_region_tiling	= nv10_fb_set_region_tiling;
-		engine->graph.grclass		= nv20_graph_grclass;
+		engine->fb.init_tile_region	= nv10_fb_init_tile_region;
+		engine->fb.set_tile_region	= nv10_fb_set_tile_region;
+		engine->fb.free_tile_region	= nv10_fb_free_tile_region;
 		engine->graph.init		= nv20_graph_init;
 		engine->graph.takedown		= nv20_graph_takedown;
 		engine->graph.channel		= nv10_graph_channel;
@@ -181,17 +190,17 @@
 		engine->graph.fifo_access	= nv04_graph_fifo_access;
 		engine->graph.load_context	= nv20_graph_load_context;
 		engine->graph.unload_context	= nv20_graph_unload_context;
-		engine->graph.set_region_tiling	= nv20_graph_set_region_tiling;
+		engine->graph.set_tile_region	= nv20_graph_set_tile_region;
 		engine->fifo.channels		= 32;
 		engine->fifo.init		= nv10_fifo_init;
-		engine->fifo.takedown		= nouveau_stub_takedown;
+		engine->fifo.takedown		= nv04_fifo_fini;
 		engine->fifo.disable		= nv04_fifo_disable;
 		engine->fifo.enable		= nv04_fifo_enable;
 		engine->fifo.reassign		= nv04_fifo_reassign;
 		engine->fifo.cache_pull		= nv04_fifo_cache_pull;
 		engine->fifo.channel_id		= nv10_fifo_channel_id;
 		engine->fifo.create_context	= nv10_fifo_create_context;
-		engine->fifo.destroy_context	= nv10_fifo_destroy_context;
+		engine->fifo.destroy_context	= nv04_fifo_destroy_context;
 		engine->fifo.load_context	= nv10_fifo_load_context;
 		engine->fifo.unload_context	= nv10_fifo_unload_context;
 		engine->display.early_init	= nv04_display_early_init;
@@ -207,16 +216,20 @@
 		engine->pm.clock_get		= nv04_pm_clock_get;
 		engine->pm.clock_pre		= nv04_pm_clock_pre;
 		engine->pm.clock_set		= nv04_pm_clock_set;
+		engine->crypt.init		= nouveau_stub_init;
+		engine->crypt.takedown		= nouveau_stub_takedown;
+		engine->vram.init		= nouveau_mem_detect;
+		engine->vram.flags_valid	= nouveau_mem_flags_valid;
 		break;
 	case 0x30:
 		engine->instmem.init		= nv04_instmem_init;
 		engine->instmem.takedown	= nv04_instmem_takedown;
 		engine->instmem.suspend		= nv04_instmem_suspend;
 		engine->instmem.resume		= nv04_instmem_resume;
-		engine->instmem.populate	= nv04_instmem_populate;
-		engine->instmem.clear		= nv04_instmem_clear;
-		engine->instmem.bind		= nv04_instmem_bind;
-		engine->instmem.unbind		= nv04_instmem_unbind;
+		engine->instmem.get		= nv04_instmem_get;
+		engine->instmem.put		= nv04_instmem_put;
+		engine->instmem.map		= nv04_instmem_map;
+		engine->instmem.unmap		= nv04_instmem_unmap;
 		engine->instmem.flush		= nv04_instmem_flush;
 		engine->mc.init			= nv04_mc_init;
 		engine->mc.takedown		= nv04_mc_takedown;
@@ -225,8 +238,9 @@
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->fb.init			= nv30_fb_init;
 		engine->fb.takedown		= nv30_fb_takedown;
-		engine->fb.set_region_tiling	= nv10_fb_set_region_tiling;
-		engine->graph.grclass		= nv30_graph_grclass;
+		engine->fb.init_tile_region	= nv30_fb_init_tile_region;
+		engine->fb.set_tile_region	= nv10_fb_set_tile_region;
+		engine->fb.free_tile_region	= nv30_fb_free_tile_region;
 		engine->graph.init		= nv30_graph_init;
 		engine->graph.takedown		= nv20_graph_takedown;
 		engine->graph.fifo_access	= nv04_graph_fifo_access;
@@ -235,17 +249,17 @@
 		engine->graph.destroy_context	= nv20_graph_destroy_context;
 		engine->graph.load_context	= nv20_graph_load_context;
 		engine->graph.unload_context	= nv20_graph_unload_context;
-		engine->graph.set_region_tiling	= nv20_graph_set_region_tiling;
+		engine->graph.set_tile_region	= nv20_graph_set_tile_region;
 		engine->fifo.channels		= 32;
 		engine->fifo.init		= nv10_fifo_init;
-		engine->fifo.takedown		= nouveau_stub_takedown;
+		engine->fifo.takedown		= nv04_fifo_fini;
 		engine->fifo.disable		= nv04_fifo_disable;
 		engine->fifo.enable		= nv04_fifo_enable;
 		engine->fifo.reassign		= nv04_fifo_reassign;
 		engine->fifo.cache_pull		= nv04_fifo_cache_pull;
 		engine->fifo.channel_id		= nv10_fifo_channel_id;
 		engine->fifo.create_context	= nv10_fifo_create_context;
-		engine->fifo.destroy_context	= nv10_fifo_destroy_context;
+		engine->fifo.destroy_context	= nv04_fifo_destroy_context;
 		engine->fifo.load_context	= nv10_fifo_load_context;
 		engine->fifo.unload_context	= nv10_fifo_unload_context;
 		engine->display.early_init	= nv04_display_early_init;
@@ -263,6 +277,10 @@
 		engine->pm.clock_set		= nv04_pm_clock_set;
 		engine->pm.voltage_get		= nouveau_voltage_gpio_get;
 		engine->pm.voltage_set		= nouveau_voltage_gpio_set;
+		engine->crypt.init		= nouveau_stub_init;
+		engine->crypt.takedown		= nouveau_stub_takedown;
+		engine->vram.init		= nouveau_mem_detect;
+		engine->vram.flags_valid	= nouveau_mem_flags_valid;
 		break;
 	case 0x40:
 	case 0x60:
@@ -270,10 +288,10 @@
 		engine->instmem.takedown	= nv04_instmem_takedown;
 		engine->instmem.suspend		= nv04_instmem_suspend;
 		engine->instmem.resume		= nv04_instmem_resume;
-		engine->instmem.populate	= nv04_instmem_populate;
-		engine->instmem.clear		= nv04_instmem_clear;
-		engine->instmem.bind		= nv04_instmem_bind;
-		engine->instmem.unbind		= nv04_instmem_unbind;
+		engine->instmem.get		= nv04_instmem_get;
+		engine->instmem.put		= nv04_instmem_put;
+		engine->instmem.map		= nv04_instmem_map;
+		engine->instmem.unmap		= nv04_instmem_unmap;
 		engine->instmem.flush		= nv04_instmem_flush;
 		engine->mc.init			= nv40_mc_init;
 		engine->mc.takedown		= nv40_mc_takedown;
@@ -282,8 +300,9 @@
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->fb.init			= nv40_fb_init;
 		engine->fb.takedown		= nv40_fb_takedown;
-		engine->fb.set_region_tiling	= nv40_fb_set_region_tiling;
-		engine->graph.grclass		= nv40_graph_grclass;
+		engine->fb.init_tile_region	= nv30_fb_init_tile_region;
+		engine->fb.set_tile_region	= nv40_fb_set_tile_region;
+		engine->fb.free_tile_region	= nv30_fb_free_tile_region;
 		engine->graph.init		= nv40_graph_init;
 		engine->graph.takedown		= nv40_graph_takedown;
 		engine->graph.fifo_access	= nv04_graph_fifo_access;
@@ -292,17 +311,17 @@
 		engine->graph.destroy_context	= nv40_graph_destroy_context;
 		engine->graph.load_context	= nv40_graph_load_context;
 		engine->graph.unload_context	= nv40_graph_unload_context;
-		engine->graph.set_region_tiling	= nv40_graph_set_region_tiling;
+		engine->graph.set_tile_region	= nv40_graph_set_tile_region;
 		engine->fifo.channels		= 32;
 		engine->fifo.init		= nv40_fifo_init;
-		engine->fifo.takedown		= nouveau_stub_takedown;
+		engine->fifo.takedown		= nv04_fifo_fini;
 		engine->fifo.disable		= nv04_fifo_disable;
 		engine->fifo.enable		= nv04_fifo_enable;
 		engine->fifo.reassign		= nv04_fifo_reassign;
 		engine->fifo.cache_pull		= nv04_fifo_cache_pull;
 		engine->fifo.channel_id		= nv10_fifo_channel_id;
 		engine->fifo.create_context	= nv40_fifo_create_context;
-		engine->fifo.destroy_context	= nv40_fifo_destroy_context;
+		engine->fifo.destroy_context	= nv04_fifo_destroy_context;
 		engine->fifo.load_context	= nv40_fifo_load_context;
 		engine->fifo.unload_context	= nv40_fifo_unload_context;
 		engine->display.early_init	= nv04_display_early_init;
@@ -321,6 +340,10 @@
 		engine->pm.voltage_get		= nouveau_voltage_gpio_get;
 		engine->pm.voltage_set		= nouveau_voltage_gpio_set;
 		engine->pm.temp_get		= nv40_temp_get;
+		engine->crypt.init		= nouveau_stub_init;
+		engine->crypt.takedown		= nouveau_stub_takedown;
+		engine->vram.init		= nouveau_mem_detect;
+		engine->vram.flags_valid	= nouveau_mem_flags_valid;
 		break;
 	case 0x50:
 	case 0x80: /* gotta love NVIDIA's consistency.. */
@@ -330,10 +353,10 @@
 		engine->instmem.takedown	= nv50_instmem_takedown;
 		engine->instmem.suspend		= nv50_instmem_suspend;
 		engine->instmem.resume		= nv50_instmem_resume;
-		engine->instmem.populate	= nv50_instmem_populate;
-		engine->instmem.clear		= nv50_instmem_clear;
-		engine->instmem.bind		= nv50_instmem_bind;
-		engine->instmem.unbind		= nv50_instmem_unbind;
+		engine->instmem.get		= nv50_instmem_get;
+		engine->instmem.put		= nv50_instmem_put;
+		engine->instmem.map		= nv50_instmem_map;
+		engine->instmem.unmap		= nv50_instmem_unmap;
 		if (dev_priv->chipset == 0x50)
 			engine->instmem.flush	= nv50_instmem_flush;
 		else
@@ -345,7 +368,6 @@
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->fb.init			= nv50_fb_init;
 		engine->fb.takedown		= nv50_fb_takedown;
-		engine->graph.grclass		= nv50_graph_grclass;
 		engine->graph.init		= nv50_graph_init;
 		engine->graph.takedown		= nv50_graph_takedown;
 		engine->graph.fifo_access	= nv50_graph_fifo_access;
@@ -381,24 +403,32 @@
 		engine->display.init		= nv50_display_init;
 		engine->display.destroy		= nv50_display_destroy;
 		engine->gpio.init		= nv50_gpio_init;
-		engine->gpio.takedown		= nouveau_stub_takedown;
+		engine->gpio.takedown		= nv50_gpio_fini;
 		engine->gpio.get		= nv50_gpio_get;
 		engine->gpio.set		= nv50_gpio_set;
+		engine->gpio.irq_register	= nv50_gpio_irq_register;
+		engine->gpio.irq_unregister	= nv50_gpio_irq_unregister;
 		engine->gpio.irq_enable		= nv50_gpio_irq_enable;
 		switch (dev_priv->chipset) {
-		case 0xa3:
-		case 0xa5:
-		case 0xa8:
-		case 0xaf:
-			engine->pm.clock_get	= nva3_pm_clock_get;
-			engine->pm.clock_pre	= nva3_pm_clock_pre;
-			engine->pm.clock_set	= nva3_pm_clock_set;
-			break;
-		default:
+		case 0x84:
+		case 0x86:
+		case 0x92:
+		case 0x94:
+		case 0x96:
+		case 0x98:
+		case 0xa0:
+		case 0xaa:
+		case 0xac:
+		case 0x50:
 			engine->pm.clock_get	= nv50_pm_clock_get;
 			engine->pm.clock_pre	= nv50_pm_clock_pre;
 			engine->pm.clock_set	= nv50_pm_clock_set;
 			break;
+		default:
+			engine->pm.clock_get	= nva3_pm_clock_get;
+			engine->pm.clock_pre	= nva3_pm_clock_pre;
+			engine->pm.clock_set	= nva3_pm_clock_set;
+			break;
 		}
 		engine->pm.voltage_get		= nouveau_voltage_gpio_get;
 		engine->pm.voltage_set		= nouveau_voltage_gpio_set;
@@ -406,16 +436,38 @@
 			engine->pm.temp_get	= nv84_temp_get;
 		else
 			engine->pm.temp_get	= nv40_temp_get;
+		switch (dev_priv->chipset) {
+		case 0x84:
+		case 0x86:
+		case 0x92:
+		case 0x94:
+		case 0x96:
+		case 0xa0:
+			engine->crypt.init	= nv84_crypt_init;
+			engine->crypt.takedown	= nv84_crypt_fini;
+			engine->crypt.create_context = nv84_crypt_create_context;
+			engine->crypt.destroy_context = nv84_crypt_destroy_context;
+			engine->crypt.tlb_flush	= nv84_crypt_tlb_flush;
+			break;
+		default:
+			engine->crypt.init	= nouveau_stub_init;
+			engine->crypt.takedown	= nouveau_stub_takedown;
+			break;
+		}
+		engine->vram.init		= nv50_vram_init;
+		engine->vram.get		= nv50_vram_new;
+		engine->vram.put		= nv50_vram_del;
+		engine->vram.flags_valid	= nv50_vram_flags_valid;
 		break;
 	case 0xC0:
 		engine->instmem.init		= nvc0_instmem_init;
 		engine->instmem.takedown	= nvc0_instmem_takedown;
 		engine->instmem.suspend		= nvc0_instmem_suspend;
 		engine->instmem.resume		= nvc0_instmem_resume;
-		engine->instmem.populate	= nvc0_instmem_populate;
-		engine->instmem.clear		= nvc0_instmem_clear;
-		engine->instmem.bind		= nvc0_instmem_bind;
-		engine->instmem.unbind		= nvc0_instmem_unbind;
+		engine->instmem.get		= nvc0_instmem_get;
+		engine->instmem.put		= nvc0_instmem_put;
+		engine->instmem.map		= nvc0_instmem_map;
+		engine->instmem.unmap		= nvc0_instmem_unmap;
 		engine->instmem.flush		= nvc0_instmem_flush;
 		engine->mc.init			= nv50_mc_init;
 		engine->mc.takedown		= nv50_mc_takedown;
@@ -424,7 +476,6 @@
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->fb.init			= nvc0_fb_init;
 		engine->fb.takedown		= nvc0_fb_takedown;
-		engine->graph.grclass		= NULL;  //nvc0_graph_grclass;
 		engine->graph.init		= nvc0_graph_init;
 		engine->graph.takedown		= nvc0_graph_takedown;
 		engine->graph.fifo_access	= nvc0_graph_fifo_access;
@@ -453,7 +504,13 @@
 		engine->gpio.takedown		= nouveau_stub_takedown;
 		engine->gpio.get		= nv50_gpio_get;
 		engine->gpio.set		= nv50_gpio_set;
+		engine->gpio.irq_register	= nv50_gpio_irq_register;
+		engine->gpio.irq_unregister	= nv50_gpio_irq_unregister;
 		engine->gpio.irq_enable		= nv50_gpio_irq_enable;
+		engine->crypt.init		= nouveau_stub_init;
+		engine->crypt.takedown		= nouveau_stub_takedown;
+		engine->vram.init		= nouveau_mem_detect;
+		engine->vram.flags_valid	= nouveau_mem_flags_valid;
 		break;
 	default:
 		NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
@@ -495,7 +552,7 @@
 
 	ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
 				     0, dev_priv->vram_size,
-				     NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
+				     NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM,
 				     &gpuobj);
 	if (ret)
 		goto out_err;
@@ -505,9 +562,10 @@
 	if (ret)
 		goto out_err;
 
-	ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
-					  dev_priv->gart_info.aper_size,
-					  NV_DMA_ACCESS_RW, &gpuobj, NULL);
+	ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
+				     0, dev_priv->gart_info.aper_size,
+				     NV_MEM_ACCESS_RW, NV_MEM_TARGET_GART,
+				     &gpuobj);
 	if (ret)
 		goto out_err;
 
@@ -516,11 +574,11 @@
 	if (ret)
 		goto out_err;
 
+	mutex_unlock(&dev_priv->channel->mutex);
 	return 0;
 
 out_err:
-	nouveau_channel_free(dev_priv->channel);
-	dev_priv->channel = NULL;
+	nouveau_channel_put(&dev_priv->channel);
 	return ret;
 }
 
@@ -567,6 +625,8 @@
 	if (ret)
 		goto out;
 	engine = &dev_priv->engine;
+	spin_lock_init(&dev_priv->channels.lock);
+	spin_lock_init(&dev_priv->tile.lock);
 	spin_lock_init(&dev_priv->context_switch_lock);
 
 	/* Make the CRTCs and I2C buses accessible */
@@ -625,26 +685,28 @@
 		if (ret)
 			goto out_fb;
 
+		/* PCRYPT */
+		ret = engine->crypt.init(dev);
+		if (ret)
+			goto out_graph;
+
 		/* PFIFO */
 		ret = engine->fifo.init(dev);
 		if (ret)
-			goto out_graph;
+			goto out_crypt;
 	}
 
 	ret = engine->display.create(dev);
 	if (ret)
 		goto out_fifo;
 
-	/* this call irq_preinstall, register irq handler and
-	 * call irq_postinstall
-	 */
-	ret = drm_irq_install(dev);
+	ret = drm_vblank_init(dev, nv_two_heads(dev) ? 2 : 1);
 	if (ret)
-		goto out_display;
+		goto out_vblank;
 
-	ret = drm_vblank_init(dev, 0);
+	ret = nouveau_irq_init(dev);
 	if (ret)
-		goto out_irq;
+		goto out_vblank;
 
 	/* what about PVIDEO/PCRTC/PRAMDAC etc? */
 
@@ -669,12 +731,16 @@
 out_fence:
 	nouveau_fence_fini(dev);
 out_irq:
-	drm_irq_uninstall(dev);
-out_display:
+	nouveau_irq_fini(dev);
+out_vblank:
+	drm_vblank_cleanup(dev);
 	engine->display.destroy(dev);
 out_fifo:
 	if (!nouveau_noaccel)
 		engine->fifo.takedown(dev);
+out_crypt:
+	if (!nouveau_noaccel)
+		engine->crypt.takedown(dev);
 out_graph:
 	if (!nouveau_noaccel)
 		engine->graph.takedown(dev);
@@ -713,12 +779,12 @@
 
 	if (!engine->graph.accel_blocked) {
 		nouveau_fence_fini(dev);
-		nouveau_channel_free(dev_priv->channel);
-		dev_priv->channel = NULL;
+		nouveau_channel_put_unlocked(&dev_priv->channel);
 	}
 
 	if (!nouveau_noaccel) {
 		engine->fifo.takedown(dev);
+		engine->crypt.takedown(dev);
 		engine->graph.takedown(dev);
 	}
 	engine->fb.takedown(dev);
@@ -737,7 +803,8 @@
 	nouveau_gpuobj_takedown(dev);
 	nouveau_mem_vram_fini(dev);
 
-	drm_irq_uninstall(dev);
+	nouveau_irq_fini(dev);
+	drm_vblank_cleanup(dev);
 
 	nouveau_pm_fini(dev);
 	nouveau_bios_takedown(dev);
@@ -1024,21 +1091,6 @@
 		else
 			getparam->value = NV_PCI;
 		break;
-	case NOUVEAU_GETPARAM_FB_PHYSICAL:
-		getparam->value = dev_priv->fb_phys;
-		break;
-	case NOUVEAU_GETPARAM_AGP_PHYSICAL:
-		getparam->value = dev_priv->gart_info.aper_base;
-		break;
-	case NOUVEAU_GETPARAM_PCI_PHYSICAL:
-		if (dev->sg) {
-			getparam->value = (unsigned long)dev->sg->virtual;
-		} else {
-			NV_ERROR(dev, "Requested PCIGART address, "
-					"while no PCIGART was created\n");
-			return -EINVAL;
-		}
-		break;
 	case NOUVEAU_GETPARAM_FB_SIZE:
 		getparam->value = dev_priv->fb_available_size;
 		break;
@@ -1046,7 +1098,7 @@
 		getparam->value = dev_priv->gart_info.aper_size;
 		break;
 	case NOUVEAU_GETPARAM_VM_VRAM_BASE:
-		getparam->value = dev_priv->vm_vram_base;
+		getparam->value = 0; /* deprecated */
 		break;
 	case NOUVEAU_GETPARAM_PTIMER_TIME:
 		getparam->value = dev_priv->engine.timer.read(dev);
@@ -1054,6 +1106,9 @@
 	case NOUVEAU_GETPARAM_HAS_BO_USAGE:
 		getparam->value = 1;
 		break;
+	case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
+		getparam->value = (dev_priv->card_type < NV_50);
+		break;
 	case NOUVEAU_GETPARAM_GRAPH_UNITS:
 		/* NV40 and NV50 versions are quite different, but register
 		 * address is the same. User is supposed to know the card
@@ -1087,8 +1142,9 @@
 }
 
 /* Wait until (value(reg) & mask) == val, up until timeout has hit */
-bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
-			uint32_t reg, uint32_t mask, uint32_t val)
+bool
+nouveau_wait_eq(struct drm_device *dev, uint64_t timeout,
+		uint32_t reg, uint32_t mask, uint32_t val)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
@@ -1102,10 +1158,33 @@
 	return false;
 }
 
+/* Wait until (value(reg) & mask) != val, up until timeout has hit */
+bool
+nouveau_wait_ne(struct drm_device *dev, uint64_t timeout,
+		uint32_t reg, uint32_t mask, uint32_t val)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+	uint64_t start = ptimer->read(dev);
+
+	do {
+		if ((nv_rd32(dev, reg) & mask) != val)
+			return true;
+	} while (ptimer->read(dev) - start < timeout);
+
+	return false;
+}
+
 /* Waits for PGRAPH to go completely idle */
 bool nouveau_wait_for_idle(struct drm_device *dev)
 {
-	if (!nv_wait(dev, NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) {
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t mask = ~0;
+
+	if (dev_priv->card_type == NV_40)
+		mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
+
+	if (!nv_wait(dev, NV04_PGRAPH_STATUS, mask, 0)) {
 		NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n",
 			 nv_rd32(dev, NV04_PGRAPH_STATUS));
 		return false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.c b/drivers/gpu/drm/nouveau/nouveau_util.c
new file mode 100644
index 0000000..fbe0fb1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_util.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2010 Nouveau Project
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/ratelimit.h>
+
+#include "nouveau_util.h"
+
+static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
+
+void
+nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
+{
+	while (bf->name) {
+		if (value & bf->mask) {
+			printk(" %s", bf->name);
+			value &= ~bf->mask;
+		}
+
+		bf++;
+	}
+
+	if (value)
+		printk(" (unknown bits 0x%08x)", value);
+}
+
+void
+nouveau_enum_print(const struct nouveau_enum *en, u32 value)
+{
+	while (en->name) {
+		if (value == en->value) {
+			printk("%s", en->name);
+			return;
+		}
+
+		en++;
+	}
+
+	printk("(unknown enum 0x%08x)", value);
+}
+
+int
+nouveau_ratelimit(void)
+{
+	return __ratelimit(&nouveau_ratelimit_state);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.h b/drivers/gpu/drm/nouveau/nouveau_util.h
new file mode 100644
index 0000000..d9ceaea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_util.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2010 Nouveau Project
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_UTIL_H__
+#define __NOUVEAU_UTIL_H__
+
+struct nouveau_bitfield {
+	u32 mask;
+	const char *name;
+};
+
+struct nouveau_enum {
+	u32 value;
+	const char *name;
+};
+
+void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value);
+void nouveau_enum_print(const struct nouveau_enum *, u32 value);
+int nouveau_ratelimit(void);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
new file mode 100644
index 0000000..07ab174
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -0,0 +1,421 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+#include "nouveau_vm.h"
+
+void
+nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
+{
+	struct nouveau_vm *vm = vma->vm;
+	struct nouveau_mm_node *r;
+	u32 offset = vma->node->offset + (delta >> 12);
+	u32 bits = vma->node->type - 12;
+	u32 pde  = (offset >> vm->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (vm->pgt_bits - bits);
+	u32 end, len;
+
+	list_for_each_entry(r, &vram->regions, rl_entry) {
+		u64 phys = (u64)r->offset << 12;
+		u32 num  = r->length >> bits;
+
+		while (num) {
+			struct nouveau_gpuobj *pgt = vm->pgt[pde].obj;
+
+			end = (pte + num);
+			if (unlikely(end >= max))
+				end = max;
+			len = end - pte;
+
+			vm->map(vma, pgt, vram, pte, len, phys);
+
+			num -= len;
+			pte += len;
+			if (unlikely(end >= max)) {
+				pde++;
+				pte = 0;
+			}
+		}
+	}
+
+	vm->flush(vm);
+}
+
+void
+nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_vram *vram)
+{
+	nouveau_vm_map_at(vma, 0, vram);
+}
+
+void
+nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
+		  dma_addr_t *list)
+{
+	struct nouveau_vm *vm = vma->vm;
+	u32 offset = vma->node->offset + (delta >> 12);
+	u32 bits = vma->node->type - 12;
+	u32 num  = length >> vma->node->type;
+	u32 pde  = (offset >> vm->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (vm->pgt_bits - bits);
+	u32 end, len;
+
+	while (num) {
+		struct nouveau_gpuobj *pgt = vm->pgt[pde].obj;
+
+		end = (pte + num);
+		if (unlikely(end >= max))
+			end = max;
+		len = end - pte;
+
+		vm->map_sg(vma, pgt, pte, list, len);
+
+		num  -= len;
+		pte  += len;
+		list += len;
+		if (unlikely(end >= max)) {
+			pde++;
+			pte = 0;
+		}
+	}
+
+	vm->flush(vm);
+}
+
+void
+nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
+{
+	struct nouveau_vm *vm = vma->vm;
+	u32 offset = vma->node->offset + (delta >> 12);
+	u32 bits = vma->node->type - 12;
+	u32 num  = length >> vma->node->type;
+	u32 pde  = (offset >> vm->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (vm->pgt_bits - bits);
+	u32 end, len;
+
+	while (num) {
+		struct nouveau_gpuobj *pgt = vm->pgt[pde].obj;
+
+		end = (pte + num);
+		if (unlikely(end >= max))
+			end = max;
+		len = end - pte;
+
+		vm->unmap(pgt, pte, len);
+
+		num -= len;
+		pte += len;
+		if (unlikely(end >= max)) {
+			pde++;
+			pte = 0;
+		}
+	}
+
+	vm->flush(vm);
+}
+
+void
+nouveau_vm_unmap(struct nouveau_vma *vma)
+{
+	nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
+}
+
+static void
+nouveau_vm_unmap_pgt(struct nouveau_vm *vm, u32 fpde, u32 lpde)
+{
+	struct nouveau_vm_pgd *vpgd;
+	struct nouveau_vm_pgt *vpgt;
+	struct nouveau_gpuobj *pgt;
+	u32 pde;
+
+	for (pde = fpde; pde <= lpde; pde++) {
+		vpgt = &vm->pgt[pde - vm->fpde];
+		if (--vpgt->refcount)
+			continue;
+
+		list_for_each_entry(vpgd, &vm->pgd_list, head) {
+			vm->unmap_pgt(vpgd->obj, pde);
+		}
+
+		pgt = vpgt->obj;
+		vpgt->obj = NULL;
+
+		mutex_unlock(&vm->mm->mutex);
+		nouveau_gpuobj_ref(NULL, &pgt);
+		mutex_lock(&vm->mm->mutex);
+	}
+}
+
+static int
+nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
+{
+	struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
+	struct nouveau_vm_pgd *vpgd;
+	struct nouveau_gpuobj *pgt;
+	u32 pgt_size;
+	int ret;
+
+	pgt_size  = (1 << (vm->pgt_bits + 12)) >> type;
+	pgt_size *= 8;
+
+	mutex_unlock(&vm->mm->mutex);
+	ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
+				 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
+	mutex_lock(&vm->mm->mutex);
+	if (unlikely(ret))
+		return ret;
+
+	/* someone beat us to filling the PDE while we didn't have the lock */
+	if (unlikely(vpgt->refcount++)) {
+		mutex_unlock(&vm->mm->mutex);
+		nouveau_gpuobj_ref(NULL, &pgt);
+		mutex_lock(&vm->mm->mutex);
+		return 0;
+	}
+
+	list_for_each_entry(vpgd, &vm->pgd_list, head) {
+		vm->map_pgt(vpgd->obj, type, pde, pgt);
+	}
+
+	vpgt->page_shift = type;
+	vpgt->obj = pgt;
+	return 0;
+}
+
+int
+nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
+	       u32 access, struct nouveau_vma *vma)
+{
+	u32 align = (1 << page_shift) >> 12;
+	u32 msize = size >> 12;
+	u32 fpde, lpde, pde;
+	int ret;
+
+	mutex_lock(&vm->mm->mutex);
+	ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node);
+	if (unlikely(ret != 0)) {
+		mutex_unlock(&vm->mm->mutex);
+		return ret;
+	}
+
+	fpde = (vma->node->offset >> vm->pgt_bits);
+	lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
+	for (pde = fpde; pde <= lpde; pde++) {
+		struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
+
+		if (likely(vpgt->refcount)) {
+			vpgt->refcount++;
+			continue;
+		}
+
+		ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
+		if (ret) {
+			if (pde != fpde)
+				nouveau_vm_unmap_pgt(vm, fpde, pde - 1);
+			nouveau_mm_put(vm->mm, vma->node);
+			mutex_unlock(&vm->mm->mutex);
+			vma->node = NULL;
+			return ret;
+		}
+	}
+	mutex_unlock(&vm->mm->mutex);
+
+	vma->vm     = vm;
+	vma->offset = (u64)vma->node->offset << 12;
+	vma->access = access;
+	return 0;
+}
+
+void
+nouveau_vm_put(struct nouveau_vma *vma)
+{
+	struct nouveau_vm *vm = vma->vm;
+	u32 fpde, lpde;
+
+	if (unlikely(vma->node == NULL))
+		return;
+	fpde = (vma->node->offset >> vm->pgt_bits);
+	lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
+
+	mutex_lock(&vm->mm->mutex);
+	nouveau_mm_put(vm->mm, vma->node);
+	vma->node = NULL;
+	nouveau_vm_unmap_pgt(vm, fpde, lpde);
+	mutex_unlock(&vm->mm->mutex);
+}
+
+int
+nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
+	       u8 pgt_bits, u8 spg_shift, u8 lpg_shift,
+	       struct nouveau_vm **pvm)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_vm *vm;
+	u64 mm_length = (offset + length) - mm_offset;
+	u32 block;
+	int ret;
+
+	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
+	if (!vm)
+		return -ENOMEM;
+
+	if (dev_priv->card_type == NV_50) {
+		vm->map_pgt = nv50_vm_map_pgt;
+		vm->unmap_pgt = nv50_vm_unmap_pgt;
+		vm->map = nv50_vm_map;
+		vm->map_sg = nv50_vm_map_sg;
+		vm->unmap = nv50_vm_unmap;
+		vm->flush = nv50_vm_flush;
+	} else {
+		kfree(vm);
+		return -ENOSYS;
+	}
+
+	vm->fpde   = offset >> pgt_bits;
+	vm->lpde   = (offset + length - 1) >> pgt_bits;
+	vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
+	if (!vm->pgt) {
+		kfree(vm);
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&vm->pgd_list);
+	vm->dev = dev;
+	vm->refcount = 1;
+	vm->pgt_bits = pgt_bits - 12;
+	vm->spg_shift = spg_shift;
+	vm->lpg_shift = lpg_shift;
+
+	block = (1 << pgt_bits);
+	if (length < block)
+		block = length;
+
+	ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
+			      block >> 12);
+	if (ret) {
+		kfree(vm);
+		return ret;
+	}
+
+	*pvm = vm;
+	return 0;
+}
+
+static int
+nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
+{
+	struct nouveau_vm_pgd *vpgd;
+	int i;
+
+	if (!pgd)
+		return 0;
+
+	vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
+	if (!vpgd)
+		return -ENOMEM;
+
+	nouveau_gpuobj_ref(pgd, &vpgd->obj);
+
+	mutex_lock(&vm->mm->mutex);
+	for (i = vm->fpde; i <= vm->lpde; i++) {
+		struct nouveau_vm_pgt *vpgt = &vm->pgt[i - vm->fpde];
+
+		if (!vpgt->obj) {
+			vm->unmap_pgt(pgd, i);
+			continue;
+		}
+
+		vm->map_pgt(pgd, vpgt->page_shift, i, vpgt->obj);
+	}
+	list_add(&vpgd->head, &vm->pgd_list);
+	mutex_unlock(&vm->mm->mutex);
+	return 0;
+}
+
+static void
+nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
+{
+	struct nouveau_vm_pgd *vpgd, *tmp;
+
+	if (!pgd)
+		return;
+
+	mutex_lock(&vm->mm->mutex);
+	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
+		if (vpgd->obj != pgd)
+			continue;
+
+		list_del(&vpgd->head);
+		nouveau_gpuobj_ref(NULL, &vpgd->obj);
+		kfree(vpgd);
+	}
+	mutex_unlock(&vm->mm->mutex);
+}
+
+static void
+nouveau_vm_del(struct nouveau_vm *vm)
+{
+	struct nouveau_vm_pgd *vpgd, *tmp;
+
+	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
+		nouveau_vm_unlink(vm, vpgd->obj);
+	}
+	WARN_ON(nouveau_mm_fini(&vm->mm) != 0);
+
+	kfree(vm->pgt);
+	kfree(vm);
+}
+
+int
+nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
+	       struct nouveau_gpuobj *pgd)
+{
+	struct nouveau_vm *vm;
+	int ret;
+
+	vm = ref;
+	if (vm) {
+		ret = nouveau_vm_link(vm, pgd);
+		if (ret)
+			return ret;
+
+		vm->refcount++;
+	}
+
+	vm = *ptr;
+	*ptr = ref;
+
+	if (vm) {
+		nouveau_vm_unlink(vm, pgd);
+
+		if (--vm->refcount == 0)
+			nouveau_vm_del(vm);
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
new file mode 100644
index 0000000..b6755cf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_VM_H__
+#define __NOUVEAU_VM_H__
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+struct nouveau_vm_pgt {
+	struct nouveau_gpuobj *obj;
+	u32 page_shift;
+	u32 refcount;
+};
+
+struct nouveau_vm_pgd {
+	struct list_head head;
+	struct nouveau_gpuobj *obj;
+};
+
+struct nouveau_vma {
+	struct nouveau_vm *vm;
+	struct nouveau_mm_node *node;
+	u64 offset;
+	u32 access;
+};
+
+struct nouveau_vm {
+	struct drm_device *dev;
+	struct nouveau_mm *mm;
+	int refcount;
+
+	struct list_head pgd_list;
+	atomic_t pgraph_refs;
+	atomic_t pcrypt_refs;
+
+	struct nouveau_vm_pgt *pgt;
+	u32 fpde;
+	u32 lpde;
+
+	u32 pgt_bits;
+	u8  spg_shift;
+	u8  lpg_shift;
+
+	void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 type, u32 pde,
+			struct nouveau_gpuobj *pgt);
+	void (*unmap_pgt)(struct nouveau_gpuobj *pgd, u32 pde);
+	void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
+		    struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+	void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
+		       u32 pte, dma_addr_t *, u32 cnt);
+	void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
+	void (*flush)(struct nouveau_vm *);
+};
+
+/* nouveau_vm.c */
+int  nouveau_vm_new(struct drm_device *, u64 offset, u64 length, u64 mm_offset,
+		    u8 pgt_bits, u8 spg_shift, u8 lpg_shift,
+		    struct nouveau_vm **);
+int  nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
+		    struct nouveau_gpuobj *pgd);
+int  nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
+		    u32 access, struct nouveau_vma *);
+void nouveau_vm_put(struct nouveau_vma *);
+void nouveau_vm_map(struct nouveau_vma *, struct nouveau_vram *);
+void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_vram *);
+void nouveau_vm_unmap(struct nouveau_vma *);
+void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
+void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
+		       dma_addr_t *);
+
+/* nv50_vm.c */
+void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 type, u32 pde,
+		     struct nouveau_gpuobj *pgt);
+void nv50_vm_unmap_pgt(struct nouveau_gpuobj *pgd, u32 pde);
+void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
+		 struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
+		    u32 pte, dma_addr_t *, u32 cnt);
+void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
+void nv50_vm_flush(struct nouveau_vm *);
+void nv50_vm_flush_engine(struct drm_device *, int engine);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 40e1807..297505e 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -551,7 +551,10 @@
 	if (dev_priv->card_type >= NV_30)
 		regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
 
-	regp->crtc_cfg = NV_PCRTC_CONFIG_START_ADDRESS_HSYNC;
+	if (dev_priv->card_type >= NV_10)
+		regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC;
+	else
+		regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC;
 
 	/* Some misc regs */
 	if (dev_priv->card_type == NV_40) {
@@ -669,6 +672,7 @@
 	if (nv_two_heads(dev))
 		NVSetOwner(dev, nv_crtc->index);
 
+	drm_vblank_pre_modeset(dev, nv_crtc->index);
 	funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
 
 	NVBlankScreen(dev, nv_crtc->index, true);
@@ -701,6 +705,7 @@
 #endif
 
 	funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+	drm_vblank_post_modeset(dev, nv_crtc->index);
 }
 
 static void nv_crtc_destroy(struct drm_crtc *crtc)
@@ -986,6 +991,7 @@
 	.cursor_move = nv04_crtc_cursor_move,
 	.gamma_set = nv_crtc_gamma_set,
 	.set_config = drm_crtc_helper_set_config,
+	.page_flip = nouveau_crtc_page_flip,
 	.destroy = nv_crtc_destroy,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index ba6423f..e000455 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -74,14 +74,14 @@
 		 * use a 10ms timeout (guards against crtc being inactive, in
 		 * which case blank state would never change)
 		 */
-		if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
-					0x00000001, 0x00000000))
+		if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+				     0x00000001, 0x00000000))
 			return -EBUSY;
-		if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
-					0x00000001, 0x00000001))
+		if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+				     0x00000001, 0x00000001))
 			return -EBUSY;
-		if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
-					0x00000001, 0x00000000))
+		if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+				     0x00000001, 0x00000000))
 			return -EBUSY;
 
 		udelay(100);
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 9e28cf7..1715e14 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -32,6 +32,9 @@
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
 
+static void nv04_vblank_crtc0_isr(struct drm_device *);
+static void nv04_vblank_crtc1_isr(struct drm_device *);
+
 static void
 nv04_display_store_initial_head_owner(struct drm_device *dev)
 {
@@ -197,6 +200,8 @@
 		func->save(encoder);
 	}
 
+	nouveau_irq_register(dev, 24, nv04_vblank_crtc0_isr);
+	nouveau_irq_register(dev, 25, nv04_vblank_crtc1_isr);
 	return 0;
 }
 
@@ -208,6 +213,9 @@
 
 	NV_DEBUG_KMS(dev, "\n");
 
+	nouveau_irq_unregister(dev, 24);
+	nouveau_irq_unregister(dev, 25);
+
 	/* Turn every CRTC off. */
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		struct drm_mode_set modeset = {
@@ -258,3 +266,16 @@
 	return 0;
 }
 
+static void
+nv04_vblank_crtc0_isr(struct drm_device *dev)
+{
+	nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
+	drm_handle_vblank(dev, 0);
+}
+
+static void
+nv04_vblank_crtc1_isr(struct drm_device *dev)
+{
+	nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
+	drm_handle_vblank(dev, 1);
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 33e4c93..7a11893 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -28,52 +28,39 @@
 #include "nouveau_ramht.h"
 #include "nouveau_fbcon.h"
 
-void
+int
 nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
 	struct drm_device *dev = nfbdev->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_channel *chan = dev_priv->channel;
+	int ret;
 
-	if (info->state != FBINFO_STATE_RUNNING)
-		return;
-
-	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) {
-		nouveau_fbcon_gpu_lockup(info);
-	}
-
-	if (info->flags & FBINFO_HWACCEL_DISABLED) {
-		cfb_copyarea(info, region);
-		return;
-	}
+	ret = RING_SPACE(chan, 4);
+	if (ret)
+		return ret;
 
 	BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3);
 	OUT_RING(chan, (region->sy << 16) | region->sx);
 	OUT_RING(chan, (region->dy << 16) | region->dx);
 	OUT_RING(chan, (region->height << 16) | region->width);
 	FIRE_RING(chan);
+	return 0;
 }
 
-void
+int
 nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
 	struct drm_device *dev = nfbdev->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_channel *chan = dev_priv->channel;
+	int ret;
 
-	if (info->state != FBINFO_STATE_RUNNING)
-		return;
-
-	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) {
-		nouveau_fbcon_gpu_lockup(info);
-	}
-
-	if (info->flags & FBINFO_HWACCEL_DISABLED) {
-		cfb_fillrect(info, rect);
-		return;
-	}
+	ret = RING_SPACE(chan, 7);
+	if (ret)
+		return ret;
 
 	BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
 	OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
@@ -87,9 +74,10 @@
 	OUT_RING(chan, (rect->dx << 16) | rect->dy);
 	OUT_RING(chan, (rect->width << 16) | rect->height);
 	FIRE_RING(chan);
+	return 0;
 }
 
-void
+int
 nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
@@ -101,23 +89,14 @@
 	uint32_t dsize;
 	uint32_t width;
 	uint32_t *data = (uint32_t *)image->data;
+	int ret;
 
-	if (info->state != FBINFO_STATE_RUNNING)
-		return;
+	if (image->depth != 1)
+		return -ENODEV;
 
-	if (image->depth != 1) {
-		cfb_imageblit(info, image);
-		return;
-	}
-
-	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) {
-		nouveau_fbcon_gpu_lockup(info);
-	}
-
-	if (info->flags & FBINFO_HWACCEL_DISABLED) {
-		cfb_imageblit(info, image);
-		return;
-	}
+	ret = RING_SPACE(chan, 8);
+	if (ret)
+		return ret;
 
 	width = ALIGN(image->width, 8);
 	dsize = ALIGN(width * image->height, 32) >> 5;
@@ -144,11 +123,9 @@
 	while (dsize) {
 		int iter_len = dsize > 128 ? 128 : dsize;
 
-		if (RING_SPACE(chan, iter_len + 1)) {
-			nouveau_fbcon_gpu_lockup(info);
-			cfb_imageblit(info, image);
-			return;
-		}
+		ret = RING_SPACE(chan, iter_len + 1);
+		if (ret)
+			return ret;
 
 		BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len);
 		OUT_RINGp(chan, data, iter_len);
@@ -157,22 +134,7 @@
 	}
 
 	FIRE_RING(chan);
-}
-
-static int
-nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *obj = NULL;
-	int ret;
-
-	ret = nouveau_gpuobj_gr_new(dev_priv->channel, class, &obj);
-	if (ret)
-		return ret;
-
-	ret = nouveau_ramht_insert(dev_priv->channel, handle, obj);
-	nouveau_gpuobj_ref(NULL, &obj);
-	return ret;
+	return 0;
 }
 
 int
@@ -214,29 +176,31 @@
 		return -EINVAL;
 	}
 
-	ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
-				   0x0062 : 0x0042, NvCtxSurf2D);
+	ret = nouveau_gpuobj_gr_new(chan, NvCtxSurf2D,
+				    dev_priv->card_type >= NV_10 ?
+				    0x0062 : 0x0042);
 	if (ret)
 		return ret;
 
-	ret = nv04_fbcon_grobj_new(dev, 0x0019, NvClipRect);
+	ret = nouveau_gpuobj_gr_new(chan, NvClipRect, 0x0019);
 	if (ret)
 		return ret;
 
-	ret = nv04_fbcon_grobj_new(dev, 0x0043, NvRop);
+	ret = nouveau_gpuobj_gr_new(chan, NvRop, 0x0043);
 	if (ret)
 		return ret;
 
-	ret = nv04_fbcon_grobj_new(dev, 0x0044, NvImagePatt);
+	ret = nouveau_gpuobj_gr_new(chan, NvImagePatt, 0x0044);
 	if (ret)
 		return ret;
 
-	ret = nv04_fbcon_grobj_new(dev, 0x004a, NvGdiRect);
+	ret = nouveau_gpuobj_gr_new(chan, NvGdiRect, 0x004a);
 	if (ret)
 		return ret;
 
-	ret = nv04_fbcon_grobj_new(dev, dev_priv->chipset >= 0x11 ?
-				   0x009f : 0x005f, NvImageBlit);
+	ret = nouveau_gpuobj_gr_new(chan, NvImageBlit,
+				    dev_priv->chipset >= 0x11 ?
+				    0x009f : 0x005f);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index 708293b..f89d104 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -28,6 +28,7 @@
 #include "drm.h"
 #include "nouveau_drv.h"
 #include "nouveau_ramht.h"
+#include "nouveau_util.h"
 
 #define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE))
 #define NV04_RAMFC__SIZE 32
@@ -128,6 +129,11 @@
 	if (ret)
 		return ret;
 
+	chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+			     NV03_USER(chan->id), PAGE_SIZE);
+	if (!chan->user)
+		return -ENOMEM;
+
 	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
 
 	/* Setup initial state */
@@ -151,10 +157,31 @@
 nv04_fifo_destroy_context(struct nouveau_channel *chan)
 {
 	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	unsigned long flags;
 
-	nv_wr32(dev, NV04_PFIFO_MODE,
-		nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	pfifo->reassign(dev, false);
 
+	/* Unload the context if it's the currently active one */
+	if (pfifo->channel_id(dev) == chan->id) {
+		pfifo->disable(dev);
+		pfifo->unload_context(dev);
+		pfifo->enable(dev);
+	}
+
+	/* Keep it from being rescheduled */
+	nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0);
+
+	pfifo->reassign(dev, true);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+	/* Free the channel resources */
+	if (chan->user) {
+		iounmap(chan->user);
+		chan->user = NULL;
+	}
 	nouveau_gpuobj_ref(NULL, &chan->ramfc);
 }
 
@@ -208,7 +235,7 @@
 	if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
 		return 0;
 
-	chan = dev_priv->fifos[chid];
+	chan = dev_priv->channels.ptr[chid];
 	if (!chan) {
 		NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
 		return -EINVAL;
@@ -267,6 +294,7 @@
 static void
 nv04_fifo_init_intr(struct drm_device *dev)
 {
+	nouveau_irq_register(dev, 8, nv04_fifo_isr);
 	nv_wr32(dev, 0x002100, 0xffffffff);
 	nv_wr32(dev, 0x002140, 0xffffffff);
 }
@@ -289,7 +317,7 @@
 	pfifo->reassign(dev, true);
 
 	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
-		if (dev_priv->fifos[i]) {
+		if (dev_priv->channels.ptr[i]) {
 			uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
 			nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
 		}
@@ -298,3 +326,207 @@
 	return 0;
 }
 
+void
+nv04_fifo_fini(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x2140, 0x00000000);
+	nouveau_irq_unregister(dev, 8);
+}
+
+static bool
+nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = NULL;
+	struct nouveau_gpuobj *obj;
+	unsigned long flags;
+	const int subc = (addr >> 13) & 0x7;
+	const int mthd = addr & 0x1ffc;
+	bool handled = false;
+	u32 engine;
+
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
+	if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels))
+		chan = dev_priv->channels.ptr[chid];
+	if (unlikely(!chan))
+		goto out;
+
+	switch (mthd) {
+	case 0x0000: /* bind object to subchannel */
+		obj = nouveau_ramht_find(chan, data);
+		if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
+			break;
+
+		chan->sw_subchannel[subc] = obj->class;
+		engine = 0x0000000f << (subc * 4);
+
+		nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
+		handled = true;
+		break;
+	default:
+		engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
+		if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
+			break;
+
+		if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc],
+					      mthd, data))
+			handled = true;
+		break;
+	}
+
+out:
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+	return handled;
+}
+
+void
+nv04_fifo_isr(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_engine *engine = &dev_priv->engine;
+	uint32_t status, reassign;
+	int cnt = 0;
+
+	reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
+	while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
+		uint32_t chid, get;
+
+		nv_wr32(dev, NV03_PFIFO_CACHES, 0);
+
+		chid = engine->fifo.channel_id(dev);
+		get  = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
+
+		if (status & NV_PFIFO_INTR_CACHE_ERROR) {
+			uint32_t mthd, data;
+			int ptr;
+
+			/* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
+			 * wrapping on my G80 chips, but CACHE1 isn't big
+			 * enough for this much data.. Tests show that it
+			 * wraps around to the start at GET=0x800.. No clue
+			 * as to why..
+			 */
+			ptr = (get & 0x7ff) >> 2;
+
+			if (dev_priv->card_type < NV_40) {
+				mthd = nv_rd32(dev,
+					NV04_PFIFO_CACHE1_METHOD(ptr));
+				data = nv_rd32(dev,
+					NV04_PFIFO_CACHE1_DATA(ptr));
+			} else {
+				mthd = nv_rd32(dev,
+					NV40_PFIFO_CACHE1_METHOD(ptr));
+				data = nv_rd32(dev,
+					NV40_PFIFO_CACHE1_DATA(ptr));
+			}
+
+			if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
+				NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
+					     "Mthd 0x%04x Data 0x%08x\n",
+					chid, (mthd >> 13) & 7, mthd & 0x1ffc,
+					data);
+			}
+
+			nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+			nv_wr32(dev, NV03_PFIFO_INTR_0,
+						NV_PFIFO_INTR_CACHE_ERROR);
+
+			nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
+				nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
+			nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
+			nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
+				nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
+			nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
+
+			nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
+				nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+			nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+
+			status &= ~NV_PFIFO_INTR_CACHE_ERROR;
+		}
+
+		if (status & NV_PFIFO_INTR_DMA_PUSHER) {
+			u32 dma_get = nv_rd32(dev, 0x003244);
+			u32 dma_put = nv_rd32(dev, 0x003240);
+			u32 push = nv_rd32(dev, 0x003220);
+			u32 state = nv_rd32(dev, 0x003228);
+
+			if (dev_priv->card_type == NV_50) {
+				u32 ho_get = nv_rd32(dev, 0x003328);
+				u32 ho_put = nv_rd32(dev, 0x003320);
+				u32 ib_get = nv_rd32(dev, 0x003334);
+				u32 ib_put = nv_rd32(dev, 0x003330);
+
+				if (nouveau_ratelimit())
+					NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
+					     "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
+					     "State 0x%08x Push 0x%08x\n",
+						chid, ho_get, dma_get, ho_put,
+						dma_put, ib_get, ib_put, state,
+						push);
+
+				/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
+				nv_wr32(dev, 0x003364, 0x00000000);
+				if (dma_get != dma_put || ho_get != ho_put) {
+					nv_wr32(dev, 0x003244, dma_put);
+					nv_wr32(dev, 0x003328, ho_put);
+				} else
+				if (ib_get != ib_put) {
+					nv_wr32(dev, 0x003334, ib_put);
+				}
+			} else {
+				NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
+					     "Put 0x%08x State 0x%08x Push 0x%08x\n",
+					chid, dma_get, dma_put, state, push);
+
+				if (dma_get != dma_put)
+					nv_wr32(dev, 0x003244, dma_put);
+			}
+
+			nv_wr32(dev, 0x003228, 0x00000000);
+			nv_wr32(dev, 0x003220, 0x00000001);
+			nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+			status &= ~NV_PFIFO_INTR_DMA_PUSHER;
+		}
+
+		if (status & NV_PFIFO_INTR_SEMAPHORE) {
+			uint32_t sem;
+
+			status &= ~NV_PFIFO_INTR_SEMAPHORE;
+			nv_wr32(dev, NV03_PFIFO_INTR_0,
+				NV_PFIFO_INTR_SEMAPHORE);
+
+			sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
+			nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
+
+			nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
+			nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+		}
+
+		if (dev_priv->card_type == NV_50) {
+			if (status & 0x00000010) {
+				nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
+				status &= ~0x00000010;
+				nv_wr32(dev, 0x002100, 0x00000010);
+			}
+		}
+
+		if (status) {
+			if (nouveau_ratelimit())
+				NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
+					status, chid);
+			nv_wr32(dev, NV03_PFIFO_INTR_0, status);
+			status = 0;
+		}
+
+		nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
+	}
+
+	if (status) {
+		NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
+		nv_wr32(dev, 0x2140, 0);
+		nv_wr32(dev, 0x140, 0);
+	}
+
+	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index c897342..af75015 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -26,6 +26,11 @@
 #include "drm.h"
 #include "nouveau_drm.h"
 #include "nouveau_drv.h"
+#include "nouveau_hw.h"
+#include "nouveau_util.h"
+
+static int  nv04_graph_register(struct drm_device *dev);
+static void nv04_graph_isr(struct drm_device *dev);
 
 static uint32_t nv04_graph_ctx_regs[] = {
 	0x0040053c,
@@ -357,10 +362,10 @@
 	if (chid >= dev_priv->engine.fifo.channels)
 		return NULL;
 
-	return dev_priv->fifos[chid];
+	return dev_priv->channels.ptr[chid];
 }
 
-void
+static void
 nv04_graph_context_switch(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -368,7 +373,6 @@
 	struct nouveau_channel *chan = NULL;
 	int chid;
 
-	pgraph->fifo_access(dev, false);
 	nouveau_wait_for_idle(dev);
 
 	/* If previous context is valid, we need to save it */
@@ -376,11 +380,9 @@
 
 	/* Load context for next channel */
 	chid = dev_priv->engine.fifo.channel_id(dev);
-	chan = dev_priv->fifos[chid];
+	chan = dev_priv->channels.ptr[chid];
 	if (chan)
 		nv04_graph_load_context(chan);
-
-	pgraph->fifo_access(dev, true);
 }
 
 static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
@@ -412,10 +414,25 @@
 
 void nv04_graph_destroy_context(struct nouveau_channel *chan)
 {
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 	struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+	unsigned long flags;
 
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	pgraph->fifo_access(dev, false);
+
+	/* Unload the context if it's the currently active one */
+	if (pgraph->channel(dev) == chan)
+		pgraph->unload_context(dev);
+
+	/* Free the context resources */
 	kfree(pgraph_ctx);
 	chan->pgraph_ctx = NULL;
+
+	pgraph->fifo_access(dev, true);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 }
 
 int nv04_graph_load_context(struct nouveau_channel *chan)
@@ -468,13 +485,19 @@
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	uint32_t tmp;
+	int ret;
 
 	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
 			~NV_PMC_ENABLE_PGRAPH);
 	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
 			 NV_PMC_ENABLE_PGRAPH);
 
+	ret = nv04_graph_register(dev);
+	if (ret)
+		return ret;
+
 	/* Enable PGRAPH interrupts */
+	nouveau_irq_register(dev, 12, nv04_graph_isr);
 	nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF);
 	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
@@ -510,6 +533,8 @@
 
 void nv04_graph_takedown(struct drm_device *dev)
 {
+	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
+	nouveau_irq_unregister(dev, 12);
 }
 
 void
@@ -524,13 +549,27 @@
 }
 
 static int
-nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
-			int mthd, uint32_t data)
+nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
+			u32 class, u32 mthd, u32 data)
 {
 	atomic_set(&chan->fence.last_sequence_irq, data);
 	return 0;
 }
 
+int
+nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
+			  u32 class, u32 mthd, u32 data)
+{
+	struct drm_device *dev = chan->dev;
+	struct nouveau_page_flip_state s;
+
+	if (!nouveau_finish_page_flip(chan, &s))
+		nv_set_crtc_base(dev, s.crtc,
+				 s.offset + s.y * s.pitch + s.x * s.bpp / 8);
+
+	return 0;
+}
+
 /*
  * Software methods, why they are needed, and how they all work:
  *
@@ -606,12 +645,12 @@
  */
 
 static void
-nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
+nv04_graph_set_ctx1(struct nouveau_channel *chan, u32 mask, u32 value)
 {
 	struct drm_device *dev = chan->dev;
-	uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
+	u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
 	int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
-	uint32_t tmp;
+	u32 tmp;
 
 	tmp  = nv_ri32(dev, instance);
 	tmp &= ~mask;
@@ -623,11 +662,11 @@
 }
 
 static void
-nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
+nv04_graph_set_ctx_val(struct nouveau_channel *chan, u32 mask, u32 value)
 {
 	struct drm_device *dev = chan->dev;
-	uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
-	uint32_t tmp, ctx1;
+	u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
+	u32 tmp, ctx1;
 	int class, op, valid = 1;
 
 	ctx1 = nv_ri32(dev, instance);
@@ -672,13 +711,13 @@
 }
 
 static int
-nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_set_operation(struct nouveau_channel *chan,
+			      u32 class, u32 mthd, u32 data)
 {
 	if (data > 5)
 		return 1;
 	/* Old versions of the objects only accept first three operations. */
-	if (data > 2 && grclass < 0x40)
+	if (data > 2 && class < 0x40)
 		return 1;
 	nv04_graph_set_ctx1(chan, 0x00038000, data << 15);
 	/* changing operation changes set of objects needed for validation */
@@ -687,8 +726,8 @@
 }
 
 static int
-nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan,
+			      u32 class, u32 mthd, u32 data)
 {
 	uint32_t min = data & 0xffff, max;
 	uint32_t w = data >> 16;
@@ -706,8 +745,8 @@
 }
 
 static int
-nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan,
+			      u32 class, u32 mthd, u32 data)
 {
 	uint32_t min = data & 0xffff, max;
 	uint32_t w = data >> 16;
@@ -725,8 +764,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan,
+			    u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -742,8 +781,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan,
+				    u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -763,8 +802,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan,
+			       u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -778,8 +817,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan,
+			       u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -793,8 +832,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_rop(struct nouveau_channel *chan,
+			 u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -808,8 +847,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan,
+			   u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -823,8 +862,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan,
+			   u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -838,8 +877,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan,
+			      u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -853,8 +892,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan,
+			      u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -868,8 +907,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan,
+				u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -883,8 +922,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan,
+			       u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -898,8 +937,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_clip(struct nouveau_channel *chan,
+			  u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -913,8 +952,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan,
+			    u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -930,194 +969,346 @@
 	return 1;
 }
 
-static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = {
-	{ 0x0150, nv04_graph_mthd_set_ref },
+static int
+nv04_graph_register(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->engine.graph.registered)
+		return 0;
+
+	/* dvd subpicture */
+	NVOBJ_CLASS(dev, 0x0038, GR);
+
+	/* m2mf */
+	NVOBJ_CLASS(dev, 0x0039, GR);
+
+	/* nv03 gdirect */
+	NVOBJ_CLASS(dev, 0x004b, GR);
+	NVOBJ_MTHD (dev, 0x004b, 0x0184, nv04_graph_mthd_bind_nv01_patt);
+	NVOBJ_MTHD (dev, 0x004b, 0x0188, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x004b, 0x018c, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x004b, 0x0190, nv04_graph_mthd_bind_surf_dst);
+	NVOBJ_MTHD (dev, 0x004b, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv04 gdirect */
+	NVOBJ_CLASS(dev, 0x004a, GR);
+	NVOBJ_MTHD (dev, 0x004a, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x004a, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x004a, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x004a, 0x0194, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x004a, 0x0198, nv04_graph_mthd_bind_surf2d);
+	NVOBJ_MTHD (dev, 0x004a, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv01 imageblit */
+	NVOBJ_CLASS(dev, 0x001f, GR);
+	NVOBJ_MTHD (dev, 0x001f, 0x0184, nv04_graph_mthd_bind_chroma);
+	NVOBJ_MTHD (dev, 0x001f, 0x0188, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x001f, 0x018c, nv04_graph_mthd_bind_nv01_patt);
+	NVOBJ_MTHD (dev, 0x001f, 0x0190, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x001f, 0x0194, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x001f, 0x0198, nv04_graph_mthd_bind_surf_dst);
+	NVOBJ_MTHD (dev, 0x001f, 0x019c, nv04_graph_mthd_bind_surf_src);
+	NVOBJ_MTHD (dev, 0x001f, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv04 imageblit */
+	NVOBJ_CLASS(dev, 0x005f, GR);
+	NVOBJ_MTHD (dev, 0x005f, 0x0184, nv04_graph_mthd_bind_chroma);
+	NVOBJ_MTHD (dev, 0x005f, 0x0188, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x005f, 0x018c, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x005f, 0x0190, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x005f, 0x0194, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x005f, 0x0198, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x005f, 0x019c, nv04_graph_mthd_bind_surf2d);
+	NVOBJ_MTHD (dev, 0x005f, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv04 iifc */
+	NVOBJ_CLASS(dev, 0x0060, GR);
+	NVOBJ_MTHD (dev, 0x0060, 0x0188, nv04_graph_mthd_bind_chroma);
+	NVOBJ_MTHD (dev, 0x0060, 0x018c, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x0060, 0x0190, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x0060, 0x0194, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x0060, 0x0198, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x0060, 0x019c, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x0060, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf);
+	NVOBJ_MTHD (dev, 0x0060, 0x03e4, nv04_graph_mthd_set_operation);
+
+	/* nv05 iifc */
+	NVOBJ_CLASS(dev, 0x0064, GR);
+
+	/* nv01 ifc */
+	NVOBJ_CLASS(dev, 0x0021, GR);
+	NVOBJ_MTHD (dev, 0x0021, 0x0184, nv04_graph_mthd_bind_chroma);
+	NVOBJ_MTHD (dev, 0x0021, 0x0188, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x0021, 0x018c, nv04_graph_mthd_bind_nv01_patt);
+	NVOBJ_MTHD (dev, 0x0021, 0x0190, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x0021, 0x0194, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x0021, 0x0198, nv04_graph_mthd_bind_surf_dst);
+	NVOBJ_MTHD (dev, 0x0021, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv04 ifc */
+	NVOBJ_CLASS(dev, 0x0061, GR);
+	NVOBJ_MTHD (dev, 0x0061, 0x0184, nv04_graph_mthd_bind_chroma);
+	NVOBJ_MTHD (dev, 0x0061, 0x0188, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x0061, 0x018c, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x0061, 0x0190, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x0061, 0x0194, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x0061, 0x0198, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x0061, 0x019c, nv04_graph_mthd_bind_surf2d);
+	NVOBJ_MTHD (dev, 0x0061, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv05 ifc */
+	NVOBJ_CLASS(dev, 0x0065, GR);
+
+	/* nv03 sifc */
+	NVOBJ_CLASS(dev, 0x0036, GR);
+	NVOBJ_MTHD (dev, 0x0036, 0x0184, nv04_graph_mthd_bind_chroma);
+	NVOBJ_MTHD (dev, 0x0036, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+	NVOBJ_MTHD (dev, 0x0036, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x0036, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x0036, 0x0194, nv04_graph_mthd_bind_surf_dst);
+	NVOBJ_MTHD (dev, 0x0036, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv04 sifc */
+	NVOBJ_CLASS(dev, 0x0076, GR);
+	NVOBJ_MTHD (dev, 0x0076, 0x0184, nv04_graph_mthd_bind_chroma);
+	NVOBJ_MTHD (dev, 0x0076, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x0076, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x0076, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x0076, 0x0194, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x0076, 0x0198, nv04_graph_mthd_bind_surf2d);
+	NVOBJ_MTHD (dev, 0x0076, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv05 sifc */
+	NVOBJ_CLASS(dev, 0x0066, GR);
+
+	/* nv03 sifm */
+	NVOBJ_CLASS(dev, 0x0037, GR);
+	NVOBJ_MTHD (dev, 0x0037, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+	NVOBJ_MTHD (dev, 0x0037, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x0037, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x0037, 0x0194, nv04_graph_mthd_bind_surf_dst);
+	NVOBJ_MTHD (dev, 0x0037, 0x0304, nv04_graph_mthd_set_operation);
+
+	/* nv04 sifm */
+	NVOBJ_CLASS(dev, 0x0077, GR);
+	NVOBJ_MTHD (dev, 0x0077, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x0077, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x0077, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x0077, 0x0194, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x0077, 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf);
+	NVOBJ_MTHD (dev, 0x0077, 0x0304, nv04_graph_mthd_set_operation);
+
+	/* null */
+	NVOBJ_CLASS(dev, 0x0030, GR);
+
+	/* surf2d */
+	NVOBJ_CLASS(dev, 0x0042, GR);
+
+	/* rop */
+	NVOBJ_CLASS(dev, 0x0043, GR);
+
+	/* beta1 */
+	NVOBJ_CLASS(dev, 0x0012, GR);
+
+	/* beta4 */
+	NVOBJ_CLASS(dev, 0x0072, GR);
+
+	/* cliprect */
+	NVOBJ_CLASS(dev, 0x0019, GR);
+
+	/* nv01 pattern */
+	NVOBJ_CLASS(dev, 0x0018, GR);
+
+	/* nv04 pattern */
+	NVOBJ_CLASS(dev, 0x0044, GR);
+
+	/* swzsurf */
+	NVOBJ_CLASS(dev, 0x0052, GR);
+
+	/* surf3d */
+	NVOBJ_CLASS(dev, 0x0053, GR);
+	NVOBJ_MTHD (dev, 0x0053, 0x02f8, nv04_graph_mthd_surf3d_clip_h);
+	NVOBJ_MTHD (dev, 0x0053, 0x02fc, nv04_graph_mthd_surf3d_clip_v);
+
+	/* nv03 tex_tri */
+	NVOBJ_CLASS(dev, 0x0048, GR);
+	NVOBJ_MTHD (dev, 0x0048, 0x0188, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x0048, 0x018c, nv04_graph_mthd_bind_surf_color);
+	NVOBJ_MTHD (dev, 0x0048, 0x0190, nv04_graph_mthd_bind_surf_zeta);
+
+	/* tex_tri */
+	NVOBJ_CLASS(dev, 0x0054, GR);
+
+	/* multitex_tri */
+	NVOBJ_CLASS(dev, 0x0055, GR);
+
+	/* nv01 chroma */
+	NVOBJ_CLASS(dev, 0x0017, GR);
+
+	/* nv04 chroma */
+	NVOBJ_CLASS(dev, 0x0057, GR);
+
+	/* surf_dst */
+	NVOBJ_CLASS(dev, 0x0058, GR);
+
+	/* surf_src */
+	NVOBJ_CLASS(dev, 0x0059, GR);
+
+	/* surf_color */
+	NVOBJ_CLASS(dev, 0x005a, GR);
+
+	/* surf_zeta */
+	NVOBJ_CLASS(dev, 0x005b, GR);
+
+	/* nv01 line */
+	NVOBJ_CLASS(dev, 0x001c, GR);
+	NVOBJ_MTHD (dev, 0x001c, 0x0184, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x001c, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+	NVOBJ_MTHD (dev, 0x001c, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x001c, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x001c, 0x0194, nv04_graph_mthd_bind_surf_dst);
+	NVOBJ_MTHD (dev, 0x001c, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv04 line */
+	NVOBJ_CLASS(dev, 0x005c, GR);
+	NVOBJ_MTHD (dev, 0x005c, 0x0184, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x005c, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x005c, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x005c, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x005c, 0x0194, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x005c, 0x0198, nv04_graph_mthd_bind_surf2d);
+	NVOBJ_MTHD (dev, 0x005c, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv01 tri */
+	NVOBJ_CLASS(dev, 0x001d, GR);
+	NVOBJ_MTHD (dev, 0x001d, 0x0184, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x001d, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+	NVOBJ_MTHD (dev, 0x001d, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x001d, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x001d, 0x0194, nv04_graph_mthd_bind_surf_dst);
+	NVOBJ_MTHD (dev, 0x001d, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv04 tri */
+	NVOBJ_CLASS(dev, 0x005d, GR);
+	NVOBJ_MTHD (dev, 0x005d, 0x0184, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x005d, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x005d, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x005d, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x005d, 0x0194, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x005d, 0x0198, nv04_graph_mthd_bind_surf2d);
+	NVOBJ_MTHD (dev, 0x005d, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv01 rect */
+	NVOBJ_CLASS(dev, 0x001e, GR);
+	NVOBJ_MTHD (dev, 0x001e, 0x0184, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x001e, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+	NVOBJ_MTHD (dev, 0x001e, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x001e, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x001e, 0x0194, nv04_graph_mthd_bind_surf_dst);
+	NVOBJ_MTHD (dev, 0x001e, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv04 rect */
+	NVOBJ_CLASS(dev, 0x005e, GR);
+	NVOBJ_MTHD (dev, 0x005e, 0x0184, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x005e, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x005e, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x005e, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x005e, 0x0194, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d);
+	NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nvsw */
+	NVOBJ_CLASS(dev, 0x506e, SW);
+	NVOBJ_MTHD (dev, 0x506e, 0x0150, nv04_graph_mthd_set_ref);
+	NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+	dev_priv->engine.graph.registered = true;
+	return 0;
+};
+
+static struct nouveau_bitfield nv04_graph_intr[] = {
+	{ NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
 	{}
 };
 
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_gdirect[] = {
-	{ 0x0184, nv04_graph_mthd_bind_nv01_patt },
-	{ 0x0188, nv04_graph_mthd_bind_rop },
-	{ 0x018c, nv04_graph_mthd_bind_beta1 },
-	{ 0x0190, nv04_graph_mthd_bind_surf_dst },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_gdirect[] = {
-	{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_beta4 },
-	{ 0x0198, nv04_graph_mthd_bind_surf2d },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_imageblit[] = {
-	{ 0x0184, nv04_graph_mthd_bind_chroma },
-	{ 0x0188, nv04_graph_mthd_bind_clip },
-	{ 0x018c, nv04_graph_mthd_bind_nv01_patt },
-	{ 0x0190, nv04_graph_mthd_bind_rop },
-	{ 0x0194, nv04_graph_mthd_bind_beta1 },
-	{ 0x0198, nv04_graph_mthd_bind_surf_dst },
-	{ 0x019c, nv04_graph_mthd_bind_surf_src },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_imageblit_ifc[] = {
-	{ 0x0184, nv04_graph_mthd_bind_chroma },
-	{ 0x0188, nv04_graph_mthd_bind_clip },
-	{ 0x018c, nv04_graph_mthd_bind_nv04_patt },
-	{ 0x0190, nv04_graph_mthd_bind_rop },
-	{ 0x0194, nv04_graph_mthd_bind_beta1 },
-	{ 0x0198, nv04_graph_mthd_bind_beta4 },
-	{ 0x019c, nv04_graph_mthd_bind_surf2d },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_iifc[] = {
-	{ 0x0188, nv04_graph_mthd_bind_chroma },
-	{ 0x018c, nv04_graph_mthd_bind_clip },
-	{ 0x0190, nv04_graph_mthd_bind_nv04_patt },
-	{ 0x0194, nv04_graph_mthd_bind_rop },
-	{ 0x0198, nv04_graph_mthd_bind_beta1 },
-	{ 0x019c, nv04_graph_mthd_bind_beta4 },
-	{ 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
-	{ 0x03e4, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_ifc[] = {
-	{ 0x0184, nv04_graph_mthd_bind_chroma },
-	{ 0x0188, nv04_graph_mthd_bind_clip },
-	{ 0x018c, nv04_graph_mthd_bind_nv01_patt },
-	{ 0x0190, nv04_graph_mthd_bind_rop },
-	{ 0x0194, nv04_graph_mthd_bind_beta1 },
-	{ 0x0198, nv04_graph_mthd_bind_surf_dst },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifc[] = {
-	{ 0x0184, nv04_graph_mthd_bind_chroma },
-	{ 0x0188, nv04_graph_mthd_bind_nv01_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_surf_dst },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifc[] = {
-	{ 0x0184, nv04_graph_mthd_bind_chroma },
-	{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_beta4 },
-	{ 0x0198, nv04_graph_mthd_bind_surf2d },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifm[] = {
-	{ 0x0188, nv04_graph_mthd_bind_nv01_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_surf_dst },
-	{ 0x0304, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifm[] = {
-	{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_beta4 },
-	{ 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf },
-	{ 0x0304, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_shape[] = {
-	{ 0x0184, nv04_graph_mthd_bind_clip },
-	{ 0x0188, nv04_graph_mthd_bind_nv01_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_surf_dst },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_shape[] = {
-	{ 0x0184, nv04_graph_mthd_bind_clip },
-	{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_beta4 },
-	{ 0x0198, nv04_graph_mthd_bind_surf2d },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_tex_tri[] = {
-	{ 0x0188, nv04_graph_mthd_bind_clip },
-	{ 0x018c, nv04_graph_mthd_bind_surf_color },
-	{ 0x0190, nv04_graph_mthd_bind_surf_zeta },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_surf3d[] = {
-	{ 0x02f8, nv04_graph_mthd_surf3d_clip_h },
-	{ 0x02fc, nv04_graph_mthd_surf3d_clip_v },
-	{},
-};
-
-struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
-	{ 0x0038, false, NULL }, /* dvd subpicture */
-	{ 0x0039, false, NULL }, /* m2mf */
-	{ 0x004b, false, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */
-	{ 0x004a, false, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */
-	{ 0x001f, false, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */
-	{ 0x005f, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */
-	{ 0x0060, false, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */
-	{ 0x0064, false, NULL }, /* nv05 iifc */
-	{ 0x0021, false, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */
-	{ 0x0061, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */
-	{ 0x0065, false, NULL }, /* nv05 ifc */
-	{ 0x0036, false, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */
-	{ 0x0076, false, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */
-	{ 0x0066, false, NULL }, /* nv05 sifc */
-	{ 0x0037, false, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */
-	{ 0x0077, false, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */
-	{ 0x0030, false, NULL }, /* null */
-	{ 0x0042, false, NULL }, /* surf2d */
-	{ 0x0043, false, NULL }, /* rop */
-	{ 0x0012, false, NULL }, /* beta1 */
-	{ 0x0072, false, NULL }, /* beta4 */
-	{ 0x0019, false, NULL }, /* cliprect */
-	{ 0x0018, false, NULL }, /* nv01 pattern */
-	{ 0x0044, false, NULL }, /* nv04 pattern */
-	{ 0x0052, false, NULL }, /* swzsurf */
-	{ 0x0053, false, nv04_graph_mthds_surf3d }, /* surf3d */
-	{ 0x0048, false, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */
-	{ 0x0054, false, NULL }, /* tex_tri */
-	{ 0x0055, false, NULL }, /* multitex_tri */
-	{ 0x0017, false, NULL }, /* nv01 chroma */
-	{ 0x0057, false, NULL }, /* nv04 chroma */
-	{ 0x0058, false, NULL }, /* surf_dst */
-	{ 0x0059, false, NULL }, /* surf_src */
-	{ 0x005a, false, NULL }, /* surf_color */
-	{ 0x005b, false, NULL }, /* surf_zeta */
-	{ 0x001c, false, nv04_graph_mthds_nv01_shape }, /* nv01 line */
-	{ 0x005c, false, nv04_graph_mthds_nv04_shape }, /* nv04 line */
-	{ 0x001d, false, nv04_graph_mthds_nv01_shape }, /* nv01 tri */
-	{ 0x005d, false, nv04_graph_mthds_nv04_shape }, /* nv04 tri */
-	{ 0x001e, false, nv04_graph_mthds_nv01_shape }, /* nv01 rect */
-	{ 0x005e, false, nv04_graph_mthds_nv04_shape }, /* nv04 rect */
-	{ 0x506e, true, nv04_graph_mthds_sw },
+static struct nouveau_bitfield nv04_graph_nstatus[] =
+{
+	{ NV04_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
+	{ NV04_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
+	{ NV04_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
+	{ NV04_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" },
 	{}
 };
 
+struct nouveau_bitfield nv04_graph_nsource[] =
+{
+	{ NV03_PGRAPH_NSOURCE_NOTIFICATION,       "NOTIFICATION" },
+	{ NV03_PGRAPH_NSOURCE_DATA_ERROR,         "DATA_ERROR" },
+	{ NV03_PGRAPH_NSOURCE_PROTECTION_ERROR,   "PROTECTION_ERROR" },
+	{ NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION,    "RANGE_EXCEPTION" },
+	{ NV03_PGRAPH_NSOURCE_LIMIT_COLOR,        "LIMIT_COLOR" },
+	{ NV03_PGRAPH_NSOURCE_LIMIT_ZETA,         "LIMIT_ZETA" },
+	{ NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD,       "ILLEGAL_MTHD" },
+	{ NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION,   "DMA_R_PROTECTION" },
+	{ NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION,   "DMA_W_PROTECTION" },
+	{ NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION,   "FORMAT_EXCEPTION" },
+	{ NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION,    "PATCH_EXCEPTION" },
+	{ NV03_PGRAPH_NSOURCE_STATE_INVALID,      "STATE_INVALID" },
+	{ NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY,      "DOUBLE_NOTIFY" },
+	{ NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE,      "NOTIFY_IN_USE" },
+	{ NV03_PGRAPH_NSOURCE_METHOD_CNT,         "METHOD_CNT" },
+	{ NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION,   "BFR_NOTIFICATION" },
+	{ NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
+	{ NV03_PGRAPH_NSOURCE_DMA_WIDTH_A,        "DMA_WIDTH_A" },
+	{ NV03_PGRAPH_NSOURCE_DMA_WIDTH_B,        "DMA_WIDTH_B" },
+	{}
+};
+
+static void
+nv04_graph_isr(struct drm_device *dev)
+{
+	u32 stat;
+
+	while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+		u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+		u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+		u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+		u32 chid = (addr & 0x0f000000) >> 24;
+		u32 subc = (addr & 0x0000e000) >> 13;
+		u32 mthd = (addr & 0x00001ffc);
+		u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+		u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff;
+		u32 show = stat;
+
+		if (stat & NV_PGRAPH_INTR_NOTIFY) {
+			if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+				if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+					show &= ~NV_PGRAPH_INTR_NOTIFY;
+			}
+		}
+
+		if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
+			nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+			stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+			show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+			nv04_graph_context_switch(dev);
+		}
+
+		nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+		nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+		if (show && nouveau_ratelimit()) {
+			NV_INFO(dev, "PGRAPH -");
+			nouveau_bitfield_print(nv04_graph_intr, show);
+			printk(" nsource:");
+			nouveau_bitfield_print(nv04_graph_nsource, nsource);
+			printk(" nstatus:");
+			nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
+				     "mthd 0x%04x data 0x%08x\n",
+				chid, subc, class, mthd, data);
+		}
+	}
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index 0b5ae29..b8e3edb 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -98,35 +98,6 @@
 }
 
 int
-nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
-		      uint32_t *sz)
-{
-	return 0;
-}
-
-void
-nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
-{
-}
-
-int
-nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
-{
-	return 0;
-}
-
-int
-nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
-{
-	return 0;
-}
-
-void
-nv04_instmem_flush(struct drm_device *dev)
-{
-}
-
-int
 nv04_instmem_suspend(struct drm_device *dev)
 {
 	return 0;
@@ -137,3 +108,56 @@
 {
 }
 
+int
+nv04_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
+{
+	struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+	struct drm_mm_node *ramin = NULL;
+
+	do {
+		if (drm_mm_pre_get(&dev_priv->ramin_heap))
+			return -ENOMEM;
+
+		spin_lock(&dev_priv->ramin_lock);
+		ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
+		if (ramin == NULL) {
+			spin_unlock(&dev_priv->ramin_lock);
+			return -ENOMEM;
+		}
+
+		ramin = drm_mm_get_block_atomic(ramin, size, align);
+		spin_unlock(&dev_priv->ramin_lock);
+	} while (ramin == NULL);
+
+	gpuobj->node  = ramin;
+	gpuobj->vinst = ramin->start;
+	return 0;
+}
+
+void
+nv04_instmem_put(struct nouveau_gpuobj *gpuobj)
+{
+	struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+
+	spin_lock(&dev_priv->ramin_lock);
+	drm_mm_put_block(gpuobj->node);
+	gpuobj->node = NULL;
+	spin_unlock(&dev_priv->ramin_lock);
+}
+
+int
+nv04_instmem_map(struct nouveau_gpuobj *gpuobj)
+{
+	gpuobj->pinst = gpuobj->vinst;
+	return 0;
+}
+
+void
+nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj)
+{
+}
+
+void
+nv04_instmem_flush(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
index cc5cda4..f78181a 100644
--- a/drivers/gpu/drm/nouveau/nv10_fb.c
+++ b/drivers/gpu/drm/nouveau/nv10_fb.c
@@ -3,23 +3,109 @@
 #include "nouveau_drv.h"
 #include "nouveau_drm.h"
 
-void
-nv10_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
-			  uint32_t size, uint32_t pitch)
+static struct drm_mm_node *
+nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t limit = max(1u, addr + size) - 1;
+	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+	struct drm_mm_node *mem;
+	int ret;
 
-	if (pitch) {
-		if (dev_priv->card_type >= NV_20)
-			addr |= 1;
-		else
-			addr |= 1 << 31;
+	ret = drm_mm_pre_get(&pfb->tag_heap);
+	if (ret)
+		return NULL;
+
+	spin_lock(&dev_priv->tile.lock);
+	mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0);
+	if (mem)
+		mem = drm_mm_get_block_atomic(mem, size, 0);
+	spin_unlock(&dev_priv->tile.lock);
+
+	return mem;
+}
+
+static void
+nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node *mem)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	spin_lock(&dev_priv->tile.lock);
+	drm_mm_put_block(mem);
+	spin_unlock(&dev_priv->tile.lock);
+}
+
+void
+nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
+			 uint32_t size, uint32_t pitch, uint32_t flags)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+	int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16);
+
+	tile->addr = addr;
+	tile->limit = max(1u, addr + size) - 1;
+	tile->pitch = pitch;
+
+	if (dev_priv->card_type == NV_20) {
+		if (flags & NOUVEAU_GEM_TILE_ZETA) {
+			/*
+			 * Allocate some of the on-die tag memory,
+			 * used to store Z compression meta-data (most
+			 * likely just a bitmap determining if a given
+			 * tile is compressed or not).
+			 */
+			tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256);
+
+			if (tile->tag_mem) {
+				/* Enable Z compression */
+				if (dev_priv->chipset >= 0x25)
+					tile->zcomp = tile->tag_mem->start |
+						(bpp == 16 ?
+						 NV25_PFB_ZCOMP_MODE_16 :
+						 NV25_PFB_ZCOMP_MODE_32);
+				else
+					tile->zcomp = tile->tag_mem->start |
+						NV20_PFB_ZCOMP_EN |
+						(bpp == 16 ? 0 :
+						 NV20_PFB_ZCOMP_MODE_32);
+			}
+
+			tile->addr |= 3;
+		} else {
+			tile->addr |= 1;
+		}
+
+	} else {
+		tile->addr |= 1 << 31;
+	}
+}
+
+void
+nv10_fb_free_tile_region(struct drm_device *dev, int i)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+	if (tile->tag_mem) {
+		nv20_fb_free_tag(dev, tile->tag_mem);
+		tile->tag_mem = NULL;
 	}
 
-	nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
-	nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
-	nv_wr32(dev, NV10_PFB_TILE(i), addr);
+	tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
+}
+
+void
+nv10_fb_set_tile_region(struct drm_device *dev, int i)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+	nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
+	nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
+	nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
+
+	if (dev_priv->card_type == NV_20)
+		nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp);
 }
 
 int
@@ -31,9 +117,14 @@
 
 	pfb->num_tiles = NV10_PFB_TILE__SIZE;
 
+	if (dev_priv->card_type == NV_20)
+		drm_mm_init(&pfb->tag_heap, 0,
+			    (dev_priv->chipset >= 0x25 ?
+			     64 * 1024 : 32 * 1024));
+
 	/* Turn all the tiling regions off. */
 	for (i = 0; i < pfb->num_tiles; i++)
-		pfb->set_region_tiling(dev, i, 0, 0, 0);
+		pfb->set_tile_region(dev, i);
 
 	return 0;
 }
@@ -41,4 +132,13 @@
 void
 nv10_fb_takedown(struct drm_device *dev)
 {
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+	int i;
+
+	for (i = 0; i < pfb->num_tiles; i++)
+		pfb->free_tile_region(dev, i);
+
+	if (dev_priv->card_type == NV_20)
+		drm_mm_takedown(&pfb->tag_heap);
 }
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
index f1b03ad..d2ecbff 100644
--- a/drivers/gpu/drm/nouveau/nv10_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
@@ -53,6 +53,11 @@
 	if (ret)
 		return ret;
 
+	chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+			     NV03_USER(chan->id), PAGE_SIZE);
+	if (!chan->user)
+		return -ENOMEM;
+
 	/* Fill entries that are seen filled in dumps of nvidia driver just
 	 * after channel's is put into DMA mode
 	 */
@@ -73,17 +78,6 @@
 	return 0;
 }
 
-void
-nv10_fifo_destroy_context(struct nouveau_channel *chan)
-{
-	struct drm_device *dev = chan->dev;
-
-	nv_wr32(dev, NV04_PFIFO_MODE,
-			nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
-
-	nouveau_gpuobj_ref(NULL, &chan->ramfc);
-}
-
 static void
 nv10_fifo_do_load_context(struct drm_device *dev, int chid)
 {
@@ -219,6 +213,7 @@
 static void
 nv10_fifo_init_intr(struct drm_device *dev)
 {
+	nouveau_irq_register(dev, 8, nv04_fifo_isr);
 	nv_wr32(dev, 0x002100, 0xffffffff);
 	nv_wr32(dev, 0x002140, 0xffffffff);
 }
@@ -241,7 +236,7 @@
 	pfifo->reassign(dev, true);
 
 	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
-		if (dev_priv->fifos[i]) {
+		if (dev_priv->channels.ptr[i]) {
 			uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
 			nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
 		}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 8e68c97..8c92edb 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -26,6 +26,10 @@
 #include "drm.h"
 #include "nouveau_drm.h"
 #include "nouveau_drv.h"
+#include "nouveau_util.h"
+
+static int  nv10_graph_register(struct drm_device *);
+static void nv10_graph_isr(struct drm_device *);
 
 #define NV10_FIFO_NUMBER 32
 
@@ -786,15 +790,13 @@
 	return 0;
 }
 
-void
+static void
 nv10_graph_context_switch(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 	struct nouveau_channel *chan = NULL;
 	int chid;
 
-	pgraph->fifo_access(dev, false);
 	nouveau_wait_for_idle(dev);
 
 	/* If previous context is valid, we need to save it */
@@ -802,11 +804,9 @@
 
 	/* Load context for next channel */
 	chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
-	chan = dev_priv->fifos[chid];
+	chan = dev_priv->channels.ptr[chid];
 	if (chan && chan->pgraph_ctx)
 		nv10_graph_load_context(chan);
-
-	pgraph->fifo_access(dev, true);
 }
 
 #define NV_WRITE_CTX(reg, val) do { \
@@ -833,7 +833,7 @@
 	if (chid >= dev_priv->engine.fifo.channels)
 		return NULL;
 
-	return dev_priv->fifos[chid];
+	return dev_priv->channels.ptr[chid];
 }
 
 int nv10_graph_create_context(struct nouveau_channel *chan)
@@ -875,37 +875,54 @@
 
 void nv10_graph_destroy_context(struct nouveau_channel *chan)
 {
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 	struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+	unsigned long flags;
 
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	pgraph->fifo_access(dev, false);
+
+	/* Unload the context if it's the currently active one */
+	if (pgraph->channel(dev) == chan)
+		pgraph->unload_context(dev);
+
+	/* Free the context resources */
 	kfree(pgraph_ctx);
 	chan->pgraph_ctx = NULL;
+
+	pgraph->fifo_access(dev, true);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 }
 
 void
-nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
-			     uint32_t size, uint32_t pitch)
+nv10_graph_set_tile_region(struct drm_device *dev, int i)
 {
-	uint32_t limit = max(1u, addr + size) - 1;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
 
-	if (pitch)
-		addr |= 1 << 31;
-
-	nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit);
-	nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch);
-	nv_wr32(dev, NV10_PGRAPH_TILE(i), addr);
+	nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), tile->limit);
+	nv_wr32(dev, NV10_PGRAPH_TSIZE(i), tile->pitch);
+	nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr);
 }
 
 int nv10_graph_init(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	uint32_t tmp;
-	int i;
+	int ret, i;
 
 	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
 			~NV_PMC_ENABLE_PGRAPH);
 	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
 			 NV_PMC_ENABLE_PGRAPH);
 
+	ret = nv10_graph_register(dev);
+	if (ret)
+		return ret;
+
+	nouveau_irq_register(dev, 12, nv10_graph_isr);
 	nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
 	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
@@ -928,7 +945,7 @@
 
 	/* Turn all the tiling regions off. */
 	for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
-		nv10_graph_set_region_tiling(dev, i, 0, 0, 0);
+		nv10_graph_set_tile_region(dev, i);
 
 	nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
 	nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
@@ -948,17 +965,17 @@
 
 void nv10_graph_takedown(struct drm_device *dev)
 {
+	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
+	nouveau_irq_unregister(dev, 12);
 }
 
 static int
-nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass,
-			   int mthd, uint32_t data)
+nv17_graph_mthd_lma_window(struct nouveau_channel *chan,
+			   u32 class, u32 mthd, u32 data)
 {
 	struct drm_device *dev = chan->dev;
 	struct graph_state *ctx = chan->pgraph_ctx;
 	struct pipe_state *pipe = &ctx->pipe_state;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 	uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
 	uint32_t xfmode0, xfmode1;
 	int i;
@@ -1025,18 +1042,14 @@
 
 	nouveau_wait_for_idle(dev);
 
-	pgraph->fifo_access(dev, true);
-
 	return 0;
 }
 
 static int
-nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass,
-			   int mthd, uint32_t data)
+nv17_graph_mthd_lma_enable(struct nouveau_channel *chan,
+			   u32 class, u32 mthd, u32 data)
 {
 	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 
 	nouveau_wait_for_idle(dev);
 
@@ -1045,40 +1058,118 @@
 	nv_wr32(dev, 0x004006b0,
 		nv_rd32(dev, 0x004006b0) | 0x8 << 24);
 
-	pgraph->fifo_access(dev, true);
-
 	return 0;
 }
 
-static struct nouveau_pgraph_object_method nv17_graph_celsius_mthds[] = {
-	{ 0x1638, nv17_graph_mthd_lma_window },
-	{ 0x163c, nv17_graph_mthd_lma_window },
-	{ 0x1640, nv17_graph_mthd_lma_window },
-	{ 0x1644, nv17_graph_mthd_lma_window },
-	{ 0x1658, nv17_graph_mthd_lma_enable },
+static int
+nv10_graph_register(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->engine.graph.registered)
+		return 0;
+
+	NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+	NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+	NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+	NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+	NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */
+	NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+	NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+	NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+	NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+	NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+	NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+	NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+	NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+	NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+	NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */
+	NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */
+	NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */
+	NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */
+
+	/* celcius */
+	if (dev_priv->chipset <= 0x10) {
+		NVOBJ_CLASS(dev, 0x0056, GR);
+	} else
+	if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) {
+		NVOBJ_CLASS(dev, 0x0096, GR);
+	} else {
+		NVOBJ_CLASS(dev, 0x0099, GR);
+		NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window);
+		NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window);
+		NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window);
+		NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window);
+		NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable);
+	}
+
+	/* nvsw */
+	NVOBJ_CLASS(dev, 0x506e, SW);
+	NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+	dev_priv->engine.graph.registered = true;
+	return 0;
+}
+
+struct nouveau_bitfield nv10_graph_intr[] = {
+	{ NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
+	{ NV_PGRAPH_INTR_ERROR,  "ERROR"  },
 	{}
 };
 
-struct nouveau_pgraph_object_class nv10_graph_grclass[] = {
-	{ 0x0030, false, NULL }, /* null */
-	{ 0x0039, false, NULL }, /* m2mf */
-	{ 0x004a, false, NULL }, /* gdirect */
-	{ 0x005f, false, NULL }, /* imageblit */
-	{ 0x009f, false, NULL }, /* imageblit (nv12) */
-	{ 0x008a, false, NULL }, /* ifc */
-	{ 0x0089, false, NULL }, /* sifm */
-	{ 0x0062, false, NULL }, /* surf2d */
-	{ 0x0043, false, NULL }, /* rop */
-	{ 0x0012, false, NULL }, /* beta1 */
-	{ 0x0072, false, NULL }, /* beta4 */
-	{ 0x0019, false, NULL }, /* cliprect */
-	{ 0x0044, false, NULL }, /* pattern */
-	{ 0x0052, false, NULL }, /* swzsurf */
-	{ 0x0093, false, NULL }, /* surf3d */
-	{ 0x0094, false, NULL }, /* tex_tri */
-	{ 0x0095, false, NULL }, /* multitex_tri */
-	{ 0x0056, false, NULL }, /* celcius (nv10) */
-	{ 0x0096, false, NULL }, /* celcius (nv11) */
-	{ 0x0099, false, nv17_graph_celsius_mthds }, /* celcius (nv17) */
+struct nouveau_bitfield nv10_graph_nstatus[] =
+{
+	{ NV10_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
+	{ NV10_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
+	{ NV10_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
+	{ NV10_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" },
 	{}
 };
+
+static void
+nv10_graph_isr(struct drm_device *dev)
+{
+	u32 stat;
+
+	while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+		u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+		u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+		u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+		u32 chid = (addr & 0x01f00000) >> 20;
+		u32 subc = (addr & 0x00070000) >> 16;
+		u32 mthd = (addr & 0x00001ffc);
+		u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+		u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
+		u32 show = stat;
+
+		if (stat & NV_PGRAPH_INTR_ERROR) {
+			if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+				if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+					show &= ~NV_PGRAPH_INTR_ERROR;
+			}
+		}
+
+		if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
+			nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+			stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+			show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+			nv10_graph_context_switch(dev);
+		}
+
+		nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+		nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+		if (show && nouveau_ratelimit()) {
+			NV_INFO(dev, "PGRAPH -");
+			nouveau_bitfield_print(nv10_graph_intr, show);
+			printk(" nsource:");
+			nouveau_bitfield_print(nv04_graph_nsource, nsource);
+			printk(" nstatus:");
+			nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
+				     "mthd 0x%04x data 0x%08x\n",
+				chid, subc, class, mthd, data);
+		}
+	}
+}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index 12ab9cd..8464b76 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -32,6 +32,10 @@
 #define NV34_GRCTX_SIZE    (18140)
 #define NV35_36_GRCTX_SIZE (22396)
 
+static int nv20_graph_register(struct drm_device *);
+static int nv30_graph_register(struct drm_device *);
+static void nv20_graph_isr(struct drm_device *);
+
 static void
 nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
 {
@@ -425,9 +429,21 @@
 	struct drm_device *dev = chan->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	unsigned long flags;
 
-	nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	pgraph->fifo_access(dev, false);
+
+	/* Unload the context if it's the currently active one */
+	if (pgraph->channel(dev) == chan)
+		pgraph->unload_context(dev);
+
+	pgraph->fifo_access(dev, true);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+	/* Free the context resources */
 	nv_wo32(pgraph->ctx_table, chan->id * 4, 0);
+	nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
 }
 
 int
@@ -496,24 +512,27 @@
 }
 
 void
-nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
-			     uint32_t size, uint32_t pitch)
+nv20_graph_set_tile_region(struct drm_device *dev, int i)
 {
-	uint32_t limit = max(1u, addr + size) - 1;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
 
-	if (pitch)
-		addr |= 1;
-
-	nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
-	nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
-	nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
+	nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
+	nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
+	nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
 
 	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
-	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, limit);
+	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->limit);
 	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
-	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, pitch);
+	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->pitch);
 	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
-	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, addr);
+	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->addr);
+
+	if (dev_priv->card_type == NV_20) {
+		nv_wr32(dev, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
+		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
+		nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->zcomp);
+	}
 }
 
 int
@@ -560,6 +579,13 @@
 
 	nv20_graph_rdi(dev);
 
+	ret = nv20_graph_register(dev);
+	if (ret) {
+		nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
+		return ret;
+	}
+
+	nouveau_irq_register(dev, 12, nv20_graph_isr);
 	nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
 	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
@@ -571,16 +597,17 @@
 	nv_wr32(dev, 0x40009C           , 0x00000040);
 
 	if (dev_priv->chipset >= 0x25) {
-		nv_wr32(dev, 0x400890, 0x00080000);
+		nv_wr32(dev, 0x400890, 0x00a8cfff);
 		nv_wr32(dev, 0x400610, 0x304B1FB6);
-		nv_wr32(dev, 0x400B80, 0x18B82880);
+		nv_wr32(dev, 0x400B80, 0x1cbd3883);
 		nv_wr32(dev, 0x400B84, 0x44000000);
 		nv_wr32(dev, 0x400098, 0x40000080);
 		nv_wr32(dev, 0x400B88, 0x000000ff);
+
 	} else {
-		nv_wr32(dev, 0x400880, 0x00080000); /* 0x0008c7df */
+		nv_wr32(dev, 0x400880, 0x0008c7df);
 		nv_wr32(dev, 0x400094, 0x00000005);
-		nv_wr32(dev, 0x400B80, 0x45CAA208); /* 0x45eae20e */
+		nv_wr32(dev, 0x400B80, 0x45eae20e);
 		nv_wr32(dev, 0x400B84, 0x24000000);
 		nv_wr32(dev, 0x400098, 0x00000040);
 		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
@@ -591,14 +618,8 @@
 
 	/* Turn all the tiling regions off. */
 	for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
-		nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
+		nv20_graph_set_tile_region(dev, i);
 
-	for (i = 0; i < 8; i++) {
-		nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
-		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4);
-		nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
-					nv_rd32(dev, 0x100300 + i * 4));
-	}
 	nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324));
 	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
 	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324));
@@ -642,6 +663,9 @@
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 
+	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
+	nouveau_irq_unregister(dev, 12);
+
 	nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
 }
 
@@ -684,9 +708,16 @@
 			return ret;
 	}
 
+	ret = nv30_graph_register(dev);
+	if (ret) {
+		nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
+		return ret;
+	}
+
 	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
 		     pgraph->ctx_table->pinst >> 4);
 
+	nouveau_irq_register(dev, 12, nv20_graph_isr);
 	nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
 	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
@@ -724,7 +755,7 @@
 
 	/* Turn all the tiling regions off. */
 	for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
-		nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
+		nv20_graph_set_tile_region(dev, i);
 
 	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
 	nv_wr32(dev, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
@@ -744,46 +775,125 @@
 	return 0;
 }
 
-struct nouveau_pgraph_object_class nv20_graph_grclass[] = {
-	{ 0x0030, false, NULL }, /* null */
-	{ 0x0039, false, NULL }, /* m2mf */
-	{ 0x004a, false, NULL }, /* gdirect */
-	{ 0x009f, false, NULL }, /* imageblit (nv12) */
-	{ 0x008a, false, NULL }, /* ifc */
-	{ 0x0089, false, NULL }, /* sifm */
-	{ 0x0062, false, NULL }, /* surf2d */
-	{ 0x0043, false, NULL }, /* rop */
-	{ 0x0012, false, NULL }, /* beta1 */
-	{ 0x0072, false, NULL }, /* beta4 */
-	{ 0x0019, false, NULL }, /* cliprect */
-	{ 0x0044, false, NULL }, /* pattern */
-	{ 0x009e, false, NULL }, /* swzsurf */
-	{ 0x0096, false, NULL }, /* celcius */
-	{ 0x0097, false, NULL }, /* kelvin (nv20) */
-	{ 0x0597, false, NULL }, /* kelvin (nv25) */
-	{}
-};
+static int
+nv20_graph_register(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
 
-struct nouveau_pgraph_object_class nv30_graph_grclass[] = {
-	{ 0x0030, false, NULL }, /* null */
-	{ 0x0039, false, NULL }, /* m2mf */
-	{ 0x004a, false, NULL }, /* gdirect */
-	{ 0x009f, false, NULL }, /* imageblit (nv12) */
-	{ 0x008a, false, NULL }, /* ifc */
-	{ 0x038a, false, NULL }, /* ifc (nv30) */
-	{ 0x0089, false, NULL }, /* sifm */
-	{ 0x0389, false, NULL }, /* sifm (nv30) */
-	{ 0x0062, false, NULL }, /* surf2d */
-	{ 0x0362, false, NULL }, /* surf2d (nv30) */
-	{ 0x0043, false, NULL }, /* rop */
-	{ 0x0012, false, NULL }, /* beta1 */
-	{ 0x0072, false, NULL }, /* beta4 */
-	{ 0x0019, false, NULL }, /* cliprect */
-	{ 0x0044, false, NULL }, /* pattern */
-	{ 0x039e, false, NULL }, /* swzsurf */
-	{ 0x0397, false, NULL }, /* rankine (nv30) */
-	{ 0x0497, false, NULL }, /* rankine (nv35) */
-	{ 0x0697, false, NULL }, /* rankine (nv34) */
-	{}
-};
+	if (dev_priv->engine.graph.registered)
+		return 0;
 
+	NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+	NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+	NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+	NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+	NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+	NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+	NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+	NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+	NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+	NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+	NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+	NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+	NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+	NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */
+	NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */
+
+	/* kelvin */
+	if (dev_priv->chipset < 0x25)
+		NVOBJ_CLASS(dev, 0x0097, GR);
+	else
+		NVOBJ_CLASS(dev, 0x0597, GR);
+
+	/* nvsw */
+	NVOBJ_CLASS(dev, 0x506e, SW);
+	NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+	dev_priv->engine.graph.registered = true;
+	return 0;
+}
+
+static int
+nv30_graph_register(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->engine.graph.registered)
+		return 0;
+
+	NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+	NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+	NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+	NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+	NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+	NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+	NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */
+	NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+	NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */
+	NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+	NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */
+	NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+	NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+	NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+	NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+	NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+	NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */
+
+	/* rankine */
+	if (0x00000003 & (1 << (dev_priv->chipset & 0x0f)))
+		NVOBJ_CLASS(dev, 0x0397, GR);
+	else
+	if (0x00000010 & (1 << (dev_priv->chipset & 0x0f)))
+		NVOBJ_CLASS(dev, 0x0697, GR);
+	else
+	if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f)))
+		NVOBJ_CLASS(dev, 0x0497, GR);
+
+	/* nvsw */
+	NVOBJ_CLASS(dev, 0x506e, SW);
+	NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+	dev_priv->engine.graph.registered = true;
+	return 0;
+}
+
+static void
+nv20_graph_isr(struct drm_device *dev)
+{
+	u32 stat;
+
+	while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+		u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+		u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+		u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+		u32 chid = (addr & 0x01f00000) >> 20;
+		u32 subc = (addr & 0x00070000) >> 16;
+		u32 mthd = (addr & 0x00001ffc);
+		u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+		u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
+		u32 show = stat;
+
+		if (stat & NV_PGRAPH_INTR_ERROR) {
+			if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+				if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+					show &= ~NV_PGRAPH_INTR_ERROR;
+			}
+		}
+
+		nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+		nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+		if (show && nouveau_ratelimit()) {
+			NV_INFO(dev, "PGRAPH -");
+			nouveau_bitfield_print(nv10_graph_intr, show);
+			printk(" nsource:");
+			nouveau_bitfield_print(nv04_graph_nsource, nsource);
+			printk(" nstatus:");
+			nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
+				     "mthd 0x%04x data 0x%08x\n",
+				chid, subc, class, mthd, data);
+		}
+	}
+}
diff --git a/drivers/gpu/drm/nouveau/nv30_fb.c b/drivers/gpu/drm/nouveau/nv30_fb.c
index 4a3f2f0..e0135f0 100644
--- a/drivers/gpu/drm/nouveau/nv30_fb.c
+++ b/drivers/gpu/drm/nouveau/nv30_fb.c
@@ -29,6 +29,27 @@
 #include "nouveau_drv.h"
 #include "nouveau_drm.h"
 
+void
+nv30_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
+			 uint32_t size, uint32_t pitch, uint32_t flags)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+	tile->addr = addr | 1;
+	tile->limit = max(1u, addr + size) - 1;
+	tile->pitch = pitch;
+}
+
+void
+nv30_fb_free_tile_region(struct drm_device *dev, int i)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+	tile->addr = tile->limit = tile->pitch = 0;
+}
+
 static int
 calc_bias(struct drm_device *dev, int k, int i, int j)
 {
@@ -65,7 +86,7 @@
 
 	/* Turn all the tiling regions off. */
 	for (i = 0; i < pfb->num_tiles; i++)
-		pfb->set_region_tiling(dev, i, 0, 0, 0);
+		pfb->set_tile_region(dev, i);
 
 	/* Init the memory timing regs at 0x10037c/0x1003ac */
 	if (dev_priv->chipset == 0x30 ||
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
index 3cd07d8..f3d9c05 100644
--- a/drivers/gpu/drm/nouveau/nv40_fb.c
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -4,26 +4,22 @@
 #include "nouveau_drm.h"
 
 void
-nv40_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
-			  uint32_t size, uint32_t pitch)
+nv40_fb_set_tile_region(struct drm_device *dev, int i)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t limit = max(1u, addr + size) - 1;
-
-	if (pitch)
-		addr |= 1;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
 
 	switch (dev_priv->chipset) {
 	case 0x40:
-		nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
-		nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
-		nv_wr32(dev, NV10_PFB_TILE(i), addr);
+		nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
+		nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
+		nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
 		break;
 
 	default:
-		nv_wr32(dev, NV40_PFB_TLIMIT(i), limit);
-		nv_wr32(dev, NV40_PFB_TSIZE(i), pitch);
-		nv_wr32(dev, NV40_PFB_TILE(i), addr);
+		nv_wr32(dev, NV40_PFB_TLIMIT(i), tile->limit);
+		nv_wr32(dev, NV40_PFB_TSIZE(i), tile->pitch);
+		nv_wr32(dev, NV40_PFB_TILE(i), tile->addr);
 		break;
 	}
 }
@@ -64,7 +60,7 @@
 
 	/* Turn all the tiling regions off. */
 	for (i = 0; i < pfb->num_tiles; i++)
-		pfb->set_region_tiling(dev, i, 0, 0, 0);
+		pfb->set_tile_region(dev, i);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index d337b8b..c86e4d4 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -47,6 +47,11 @@
 	if (ret)
 		return ret;
 
+	chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+			     NV40_USER(chan->id), PAGE_SIZE);
+	if (!chan->user)
+		return -ENOMEM;
+
 	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
 
 	nv_wi32(dev, fc +  0, chan->pushbuf_base);
@@ -70,17 +75,6 @@
 	return 0;
 }
 
-void
-nv40_fifo_destroy_context(struct nouveau_channel *chan)
-{
-	struct drm_device *dev = chan->dev;
-
-	nv_wr32(dev, NV04_PFIFO_MODE,
-		nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
-
-	nouveau_gpuobj_ref(NULL, &chan->ramfc);
-}
-
 static void
 nv40_fifo_do_load_context(struct drm_device *dev, int chid)
 {
@@ -279,6 +273,7 @@
 static void
 nv40_fifo_init_intr(struct drm_device *dev)
 {
+	nouveau_irq_register(dev, 8, nv04_fifo_isr);
 	nv_wr32(dev, 0x002100, 0xffffffff);
 	nv_wr32(dev, 0x002140, 0xffffffff);
 }
@@ -301,7 +296,7 @@
 	pfifo->reassign(dev, true);
 
 	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
-		if (dev_priv->fifos[i]) {
+		if (dev_priv->channels.ptr[i]) {
 			uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
 			nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
 		}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 7ee1b91..0618846 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -29,6 +29,9 @@
 #include "nouveau_drv.h"
 #include "nouveau_grctx.h"
 
+static int nv40_graph_register(struct drm_device *);
+static void nv40_graph_isr(struct drm_device *);
+
 struct nouveau_channel *
 nv40_graph_channel(struct drm_device *dev)
 {
@@ -42,7 +45,7 @@
 	inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
 
 	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
-		struct nouveau_channel *chan = dev_priv->fifos[i];
+		struct nouveau_channel *chan = dev_priv->channels.ptr[i];
 
 		if (chan && chan->ramin_grctx &&
 		    chan->ramin_grctx->pinst == inst)
@@ -79,6 +82,22 @@
 void
 nv40_graph_destroy_context(struct nouveau_channel *chan)
 {
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	pgraph->fifo_access(dev, false);
+
+	/* Unload the context if it's the currently active one */
+	if (pgraph->channel(dev) == chan)
+		pgraph->unload_context(dev);
+
+	pgraph->fifo_access(dev, true);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+	/* Free the context resources */
 	nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
 }
 
@@ -174,43 +193,39 @@
 }
 
 void
-nv40_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
-			     uint32_t size, uint32_t pitch)
+nv40_graph_set_tile_region(struct drm_device *dev, int i)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t limit = max(1u, addr + size) - 1;
-
-	if (pitch)
-		addr |= 1;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
 
 	switch (dev_priv->chipset) {
 	case 0x44:
 	case 0x4a:
 	case 0x4e:
-		nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
-		nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
-		nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
+		nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
+		nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
+		nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
 		break;
 
 	case 0x46:
 	case 0x47:
 	case 0x49:
 	case 0x4b:
-		nv_wr32(dev, NV47_PGRAPH_TSIZE(i), pitch);
-		nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), limit);
-		nv_wr32(dev, NV47_PGRAPH_TILE(i), addr);
-		nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
-		nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
-		nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
+		nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
+		nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
+		nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
+		nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+		nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+		nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
 		break;
 
 	default:
-		nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
-		nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
-		nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
-		nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
-		nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
-		nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
+		nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
+		nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
+		nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
+		nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+		nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+		nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
 		break;
 	}
 }
@@ -232,7 +247,7 @@
 	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
 	struct nouveau_grctx ctx = {};
 	uint32_t vramsz, *cp;
-	int i, j;
+	int ret, i, j;
 
 	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
 			~NV_PMC_ENABLE_PGRAPH);
@@ -256,9 +271,14 @@
 
 	kfree(cp);
 
+	ret = nv40_graph_register(dev);
+	if (ret)
+		return ret;
+
 	/* No context present currently */
 	nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
 
+	nouveau_irq_register(dev, 12, nv40_graph_isr);
 	nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
 	nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
@@ -347,7 +367,7 @@
 
 	/* Turn all the tiling regions off. */
 	for (i = 0; i < pfb->num_tiles; i++)
-		nv40_graph_set_region_tiling(dev, i, 0, 0, 0);
+		nv40_graph_set_tile_region(dev, i);
 
 	/* begin RAM config */
 	vramsz = pci_resource_len(dev->pdev, 0) - 1;
@@ -390,26 +410,111 @@
 
 void nv40_graph_takedown(struct drm_device *dev)
 {
+	nouveau_irq_unregister(dev, 12);
 }
 
-struct nouveau_pgraph_object_class nv40_graph_grclass[] = {
-	{ 0x0030, false, NULL }, /* null */
-	{ 0x0039, false, NULL }, /* m2mf */
-	{ 0x004a, false, NULL }, /* gdirect */
-	{ 0x009f, false, NULL }, /* imageblit (nv12) */
-	{ 0x008a, false, NULL }, /* ifc */
-	{ 0x0089, false, NULL }, /* sifm */
-	{ 0x3089, false, NULL }, /* sifm (nv40) */
-	{ 0x0062, false, NULL }, /* surf2d */
-	{ 0x3062, false, NULL }, /* surf2d (nv40) */
-	{ 0x0043, false, NULL }, /* rop */
-	{ 0x0012, false, NULL }, /* beta1 */
-	{ 0x0072, false, NULL }, /* beta4 */
-	{ 0x0019, false, NULL }, /* cliprect */
-	{ 0x0044, false, NULL }, /* pattern */
-	{ 0x309e, false, NULL }, /* swzsurf */
-	{ 0x4097, false, NULL }, /* curie (nv40) */
-	{ 0x4497, false, NULL }, /* curie (nv44) */
-	{}
-};
+static int
+nv40_graph_register(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
 
+	if (dev_priv->engine.graph.registered)
+		return 0;
+
+	NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+	NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+	NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+	NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+	NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+	NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+	NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+	NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */
+	NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+	NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */
+	NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+	NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+	NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+	NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+	NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+	NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
+
+	/* curie */
+	if (dev_priv->chipset >= 0x60 ||
+	    0x00005450 & (1 << (dev_priv->chipset & 0x0f)))
+		NVOBJ_CLASS(dev, 0x4497, GR);
+	else
+		NVOBJ_CLASS(dev, 0x4097, GR);
+
+	/* nvsw */
+	NVOBJ_CLASS(dev, 0x506e, SW);
+	NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+	dev_priv->engine.graph.registered = true;
+	return 0;
+}
+
+static int
+nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
+	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+		chan = dev_priv->channels.ptr[i];
+		if (!chan || !chan->ramin_grctx)
+			continue;
+
+		if (inst == chan->ramin_grctx->pinst)
+			break;
+	}
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+	return i;
+}
+
+static void
+nv40_graph_isr(struct drm_device *dev)
+{
+	u32 stat;
+
+	while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+		u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+		u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+		u32 inst = (nv_rd32(dev, 0x40032c) & 0x000fffff) << 4;
+		u32 chid = nv40_graph_isr_chid(dev, inst);
+		u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+		u32 subc = (addr & 0x00070000) >> 16;
+		u32 mthd = (addr & 0x00001ffc);
+		u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+		u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xffff;
+		u32 show = stat;
+
+		if (stat & NV_PGRAPH_INTR_ERROR) {
+			if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+				if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+					show &= ~NV_PGRAPH_INTR_ERROR;
+			} else
+			if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
+				nv_mask(dev, 0x402000, 0, 0);
+			}
+		}
+
+		nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+		nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+		if (show && nouveau_ratelimit()) {
+			NV_INFO(dev, "PGRAPH -");
+			nouveau_bitfield_print(nv10_graph_intr, show);
+			printk(" nsource:");
+			nouveau_bitfield_print(nv04_graph_nsource, nsource);
+			printk(" nstatus:");
+			nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - ch %d (0x%08x) subc %d "
+				     "class 0x%04x mthd 0x%04x data 0x%08x\n",
+				chid, inst, subc, class, mthd, data);
+		}
+	}
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 56476d0..2c346f7 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -345,7 +345,6 @@
 		     uint32_t buffer_handle, uint32_t width, uint32_t height)
 {
 	struct drm_device *dev = crtc->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
 	struct nouveau_bo *cursor = NULL;
 	struct drm_gem_object *gem;
@@ -374,8 +373,7 @@
 
 	nouveau_bo_unmap(cursor);
 
-	nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset -
-					    dev_priv->vm_vram_base);
+	nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT);
 	nv_crtc->cursor.show(nv_crtc, true);
 
 out:
@@ -437,6 +435,7 @@
 	.cursor_move = nv50_crtc_cursor_move,
 	.gamma_set = nv50_crtc_gamma_set,
 	.set_config = drm_crtc_helper_set_config,
+	.page_flip = nouveau_crtc_page_flip,
 	.destroy = nv50_crtc_destroy,
 };
 
@@ -453,6 +452,7 @@
 
 	NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
 
+	drm_vblank_pre_modeset(dev, nv_crtc->index);
 	nv50_crtc_blank(nv_crtc, true);
 }
 
@@ -468,6 +468,7 @@
 	NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
 
 	nv50_crtc_blank(nv_crtc, false);
+	drm_vblank_post_modeset(dev, nv_crtc->index);
 
 	ret = RING_SPACE(evo, 2);
 	if (ret) {
@@ -545,7 +546,7 @@
 		 return -EINVAL;
 	}
 
-	nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base;
+	nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT;
 	nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
 	nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
 	if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index f624c61..7cc94ed 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -33,6 +33,8 @@
 #include "nouveau_ramht.h"
 #include "drm_crtc_helper.h"
 
+static void nv50_display_isr(struct drm_device *);
+
 static inline int
 nv50_sor_nr(struct drm_device *dev)
 {
@@ -46,159 +48,6 @@
 	return 4;
 }
 
-static void
-nv50_evo_channel_del(struct nouveau_channel **pchan)
-{
-	struct nouveau_channel *chan = *pchan;
-
-	if (!chan)
-		return;
-	*pchan = NULL;
-
-	nouveau_gpuobj_channel_takedown(chan);
-	nouveau_bo_unmap(chan->pushbuf_bo);
-	nouveau_bo_ref(NULL, &chan->pushbuf_bo);
-
-	if (chan->user)
-		iounmap(chan->user);
-
-	kfree(chan);
-}
-
-static int
-nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
-		    uint32_t tile_flags, uint32_t magic_flags,
-		    uint32_t offset, uint32_t limit)
-{
-	struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
-	struct drm_device *dev = evo->dev;
-	struct nouveau_gpuobj *obj = NULL;
-	int ret;
-
-	ret = nouveau_gpuobj_new(dev, evo, 6*4, 32, 0, &obj);
-	if (ret)
-		return ret;
-	obj->engine = NVOBJ_ENGINE_DISPLAY;
-
-	nv_wo32(obj,  0, (tile_flags << 22) | (magic_flags << 16) | class);
-	nv_wo32(obj,  4, limit);
-	nv_wo32(obj,  8, offset);
-	nv_wo32(obj, 12, 0x00000000);
-	nv_wo32(obj, 16, 0x00000000);
-	if (dev_priv->card_type < NV_C0)
-		nv_wo32(obj, 20, 0x00010000);
-	else
-		nv_wo32(obj, 20, 0x00020000);
-	dev_priv->engine.instmem.flush(dev);
-
-	ret = nouveau_ramht_insert(evo, name, obj);
-	nouveau_gpuobj_ref(NULL, &obj);
-	if (ret) {
-		return ret;
-	}
-
-	return 0;
-}
-
-static int
-nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *ramht = NULL;
-	struct nouveau_channel *chan;
-	int ret;
-
-	chan = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
-	if (!chan)
-		return -ENOMEM;
-	*pchan = chan;
-
-	chan->id = -1;
-	chan->dev = dev;
-	chan->user_get = 4;
-	chan->user_put = 0;
-
-	ret = nouveau_gpuobj_new(dev, NULL, 32768, 0x1000,
-				 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
-	if (ret) {
-		NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
-		nv50_evo_channel_del(pchan);
-		return ret;
-	}
-
-	ret = drm_mm_init(&chan->ramin_heap, 0, 32768);
-	if (ret) {
-		NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
-		nv50_evo_channel_del(pchan);
-		return ret;
-	}
-
-	ret = nouveau_gpuobj_new(dev, chan, 4096, 16, 0, &ramht);
-	if (ret) {
-		NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
-		nv50_evo_channel_del(pchan);
-		return ret;
-	}
-
-	ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
-	nouveau_gpuobj_ref(NULL, &ramht);
-	if (ret) {
-		nv50_evo_channel_del(pchan);
-		return ret;
-	}
-
-	if (dev_priv->chipset != 0x50) {
-		ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19,
-					  0, 0xffffffff);
-		if (ret) {
-			nv50_evo_channel_del(pchan);
-			return ret;
-		}
-
-
-		ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB32, 0x7a, 0x19,
-					  0, 0xffffffff);
-		if (ret) {
-			nv50_evo_channel_del(pchan);
-			return ret;
-		}
-	}
-
-	ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19,
-				  0, dev_priv->vram_size);
-	if (ret) {
-		nv50_evo_channel_del(pchan);
-		return ret;
-	}
-
-	ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
-			     false, true, &chan->pushbuf_bo);
-	if (ret == 0)
-		ret = nouveau_bo_pin(chan->pushbuf_bo, TTM_PL_FLAG_VRAM);
-	if (ret) {
-		NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
-		nv50_evo_channel_del(pchan);
-		return ret;
-	}
-
-	ret = nouveau_bo_map(chan->pushbuf_bo);
-	if (ret) {
-		NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
-		nv50_evo_channel_del(pchan);
-		return ret;
-	}
-
-	chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
-					NV50_PDISPLAY_USER(0), PAGE_SIZE);
-	if (!chan->user) {
-		NV_ERROR(dev, "Error mapping EVO control regs.\n");
-		nv50_evo_channel_del(pchan);
-		return -ENOMEM;
-	}
-
-	return 0;
-}
-
 int
 nv50_display_early_init(struct drm_device *dev)
 {
@@ -214,17 +63,16 @@
 nv50_display_init(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
 	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-	struct nouveau_channel *evo = dev_priv->evo;
 	struct drm_connector *connector;
-	uint32_t val, ram_amount;
-	uint64_t start;
+	struct nouveau_channel *evo;
 	int ret, i;
+	u32 val;
 
 	NV_DEBUG_KMS(dev, "\n");
 
 	nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004));
+
 	/*
 	 * I think the 0x006101XX range is some kind of main control area
 	 * that enables things.
@@ -240,16 +88,19 @@
 		val = nv_rd32(dev, 0x0061610c + (i * 0x800));
 		nv_wr32(dev, 0x0061019c + (i * 0x10), val);
 	}
+
 	/* DAC */
 	for (i = 0; i < 3; i++) {
 		val = nv_rd32(dev, 0x0061a000 + (i * 0x800));
 		nv_wr32(dev, 0x006101d0 + (i * 0x04), val);
 	}
+
 	/* SOR */
 	for (i = 0; i < nv50_sor_nr(dev); i++) {
 		val = nv_rd32(dev, 0x0061c000 + (i * 0x800));
 		nv_wr32(dev, 0x006101e0 + (i * 0x04), val);
 	}
+
 	/* EXT */
 	for (i = 0; i < 3; i++) {
 		val = nv_rd32(dev, 0x0061e000 + (i * 0x800));
@@ -262,17 +113,6 @@
 		nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
 	}
 
-	/* This used to be in crtc unblank, but seems out of place there. */
-	nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0);
-	/* RAM is clamped to 256 MiB. */
-	ram_amount = dev_priv->vram_size;
-	NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount);
-	if (ram_amount > 256*1024*1024)
-		ram_amount = 256*1024*1024;
-	nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1);
-	nv_wr32(dev, NV50_PDISPLAY_UNK_388, 0x150000);
-	nv_wr32(dev, NV50_PDISPLAY_UNK_38C, 0);
-
 	/* The precise purpose is unknown, i suspect it has something to do
 	 * with text mode.
 	 */
@@ -287,37 +127,6 @@
 		}
 	}
 
-	/* taken from nv bug #12637, attempts to un-wedge the hw if it's
-	 * stuck in some unspecified state
-	 */
-	start = ptimer->read(dev);
-	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x2b00);
-	while ((val = nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))) & 0x1e0000) {
-		if ((val & 0x9f0000) == 0x20000)
-			nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
-							val | 0x800000);
-
-		if ((val & 0x3f0000) == 0x30000)
-			nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
-							val | 0x200000);
-
-		if (ptimer->read(dev) - start > 1000000000ULL) {
-			NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n");
-			NV_ERROR(dev, "0x610200 = 0x%08x\n", val);
-			return -EBUSY;
-		}
-	}
-
-	nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE);
-	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03);
-	if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
-		     0x40000000, 0x40000000)) {
-		NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n");
-		NV_ERROR(dev, "0x610200 = 0x%08x\n",
-			  nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
-		return -EBUSY;
-	}
-
 	for (i = 0; i < 2; i++) {
 		nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
 		if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
@@ -341,39 +150,31 @@
 		}
 	}
 
-	nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
+	nv_wr32(dev, NV50_PDISPLAY_PIO_CTRL, 0x00000000);
+	nv_mask(dev, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000);
+	nv_wr32(dev, NV50_PDISPLAY_INTR_EN_0, 0x00000000);
+	nv_mask(dev, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000);
+	nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1,
+		     NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 |
+		     NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
+		     NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
 
-	/* initialise fifo */
-	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0),
-		((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) |
-		NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM |
-		NV50_PDISPLAY_CHANNEL_DMA_CB_VALID);
-	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000);
-	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002);
-	if (!nv_wait(dev, 0x610200, 0x80000000, 0x00000000)) {
-		NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n");
-		NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200));
-		return -EBUSY;
+	/* enable hotplug interrupts */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct nouveau_connector *conn = nouveau_connector(connector);
+
+		if (conn->dcb->gpio_tag == 0xff)
+			continue;
+
+		pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
 	}
-	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
-		(nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)) & ~0x00000003) |
-		 NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
-	nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0);
-	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x01000003 |
-		NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
-	nv_wr32(dev, 0x610300, nv_rd32(dev, 0x610300) & ~1);
 
-	evo->dma.max = (4096/4) - 2;
-	evo->dma.put = 0;
-	evo->dma.cur = evo->dma.put;
-	evo->dma.free = evo->dma.max - evo->dma.cur;
-
-	ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
+	ret = nv50_evo_init(dev);
 	if (ret)
 		return ret;
+	evo = dev_priv->evo;
 
-	for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
-		OUT_RING(evo, 0);
+	nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
 
 	ret = RING_SPACE(evo, 11);
 	if (ret)
@@ -393,21 +194,6 @@
 	if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
 		NV_ERROR(dev, "evo pushbuf stalled\n");
 
-	/* enable clock change interrupts. */
-	nv_wr32(dev, 0x610028, 0x00010001);
-	nv_wr32(dev, NV50_PDISPLAY_INTR_EN, (NV50_PDISPLAY_INTR_EN_CLK_UNK10 |
-					     NV50_PDISPLAY_INTR_EN_CLK_UNK20 |
-					     NV50_PDISPLAY_INTR_EN_CLK_UNK40));
-
-	/* enable hotplug interrupts */
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		struct nouveau_connector *conn = nouveau_connector(connector);
-
-		if (conn->dcb->gpio_tag == 0xff)
-			continue;
-
-		pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
-	}
 
 	return 0;
 }
@@ -452,13 +238,7 @@
 		}
 	}
 
-	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0);
-	nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0);
-	if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) {
-		NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n");
-		NV_ERROR(dev, "0x610200 = 0x%08x\n",
-			  nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
-	}
+	nv50_evo_fini(dev);
 
 	for (i = 0; i < 3; i++) {
 		if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i),
@@ -470,7 +250,7 @@
 	}
 
 	/* disable interrupts. */
-	nv_wr32(dev, NV50_PDISPLAY_INTR_EN, 0x00000000);
+	nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
 
 	/* disable hotplug interrupts */
 	nv_wr32(dev, 0xe054, 0xffffffff);
@@ -508,13 +288,6 @@
 
 	dev->mode_config.fb_base = dev_priv->fb_phys;
 
-	/* Create EVO channel */
-	ret = nv50_evo_channel_new(dev, &dev_priv->evo);
-	if (ret) {
-		NV_ERROR(dev, "Error creating EVO channel: %d\n", ret);
-		return ret;
-	}
-
 	/* Create CRTC objects */
 	for (i = 0; i < 2; i++)
 		nv50_crtc_create(dev, i);
@@ -557,6 +330,9 @@
 		}
 	}
 
+	INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
+	nouveau_irq_register(dev, 26, nv50_display_isr);
+
 	ret = nv50_display_init(dev);
 	if (ret) {
 		nv50_display_destroy(dev);
@@ -569,14 +345,12 @@
 void
 nv50_display_destroy(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
 	NV_DEBUG_KMS(dev, "\n");
 
 	drm_mode_config_cleanup(dev);
 
 	nv50_display_disable(dev);
-	nv50_evo_channel_del(&dev_priv->evo);
+	nouveau_irq_unregister(dev, 26);
 }
 
 static u16
@@ -660,32 +434,32 @@
 nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan;
-	struct list_head *entry, *tmp;
+	struct nouveau_channel *chan, *tmp;
 
-	list_for_each_safe(entry, tmp, &dev_priv->vbl_waiting) {
-		chan = list_entry(entry, struct nouveau_channel, nvsw.vbl_wait);
+	list_for_each_entry_safe(chan, tmp, &dev_priv->vbl_waiting,
+				 nvsw.vbl_wait) {
+		if (chan->nvsw.vblsem_head != crtc)
+			continue;
 
 		nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset,
 						chan->nvsw.vblsem_rval);
 		list_del(&chan->nvsw.vbl_wait);
+		drm_vblank_put(dev, crtc);
 	}
+
+	drm_handle_vblank(dev, crtc);
 }
 
 static void
 nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr)
 {
-	intr &= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
-
 	if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0)
 		nv50_display_vblank_crtc_handler(dev, 0);
 
 	if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1)
 		nv50_display_vblank_crtc_handler(dev, 1);
 
-	nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
-		     NV50_PDISPLAY_INTR_EN) & ~intr);
-	nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr);
+	nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_VBLANK_CRTC);
 }
 
 static void
@@ -1011,108 +785,31 @@
 static void
 nv50_display_error_handler(struct drm_device *dev)
 {
-	uint32_t addr, data;
+	u32 channels = (nv_rd32(dev, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16;
+	u32 addr, data;
+	int chid;
 
-	nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000);
-	addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR);
-	data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA);
-
-	NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x (0x%04x 0x%02x)\n",
-		 0, addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
-
-	nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000);
-}
-
-void
-nv50_display_irq_hotplug_bh(struct work_struct *work)
-{
-	struct drm_nouveau_private *dev_priv =
-		container_of(work, struct drm_nouveau_private, hpd_work);
-	struct drm_device *dev = dev_priv->dev;
-	struct drm_connector *connector;
-	const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
-	uint32_t unplug_mask, plug_mask, change_mask;
-	uint32_t hpd0, hpd1;
-
-	spin_lock_irq(&dev_priv->hpd_state.lock);
-	hpd0 = dev_priv->hpd_state.hpd0_bits;
-	dev_priv->hpd_state.hpd0_bits = 0;
-	hpd1 = dev_priv->hpd_state.hpd1_bits;
-	dev_priv->hpd_state.hpd1_bits = 0;
-	spin_unlock_irq(&dev_priv->hpd_state.lock);
-
-	hpd0 &= nv_rd32(dev, 0xe050);
-	if (dev_priv->chipset >= 0x90)
-		hpd1 &= nv_rd32(dev, 0xe070);
-
-	plug_mask   = (hpd0 & 0x0000ffff) | (hpd1 << 16);
-	unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000);
-	change_mask = plug_mask | unplug_mask;
-
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		struct drm_encoder_helper_funcs *helper;
-		struct nouveau_connector *nv_connector =
-			nouveau_connector(connector);
-		struct nouveau_encoder *nv_encoder;
-		struct dcb_gpio_entry *gpio;
-		uint32_t reg;
-		bool plugged;
-
-		if (!nv_connector->dcb)
+	for (chid = 0; chid < 5; chid++) {
+		if (!(channels & (1 << chid)))
 			continue;
 
-		gpio = nouveau_bios_gpio_entry(dev, nv_connector->dcb->gpio_tag);
-		if (!gpio || !(change_mask & (1 << gpio->line)))
-			continue;
+		nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000 << chid);
+		addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid));
+		data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA(chid));
+		NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x "
+			      "(0x%04x 0x%02x)\n", chid,
+			 addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
 
-		reg = nv_rd32(dev, gpio_reg[gpio->line >> 3]);
-		plugged = !!(reg & (4 << ((gpio->line & 7) << 2)));
-		NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
-			drm_get_connector_name(connector)) ;
-
-		if (!connector->encoder || !connector->encoder->crtc ||
-		    !connector->encoder->crtc->enabled)
-			continue;
-		nv_encoder = nouveau_encoder(connector->encoder);
-		helper = connector->encoder->helper_private;
-
-		if (nv_encoder->dcb->type != OUTPUT_DP)
-			continue;
-
-		if (plugged)
-			helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
-		else
-			helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
+		nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000);
 	}
-
-	drm_helper_hpd_irq_event(dev);
 }
 
-void
-nv50_display_irq_handler(struct drm_device *dev)
+static void
+nv50_display_isr(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	uint32_t delayed = 0;
 
-	if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) {
-		uint32_t hpd0_bits, hpd1_bits = 0;
-
-		hpd0_bits = nv_rd32(dev, 0xe054);
-		nv_wr32(dev, 0xe054, hpd0_bits);
-
-		if (dev_priv->chipset >= 0x90) {
-			hpd1_bits = nv_rd32(dev, 0xe074);
-			nv_wr32(dev, 0xe074, hpd1_bits);
-		}
-
-		spin_lock(&dev_priv->hpd_state.lock);
-		dev_priv->hpd_state.hpd0_bits |= hpd0_bits;
-		dev_priv->hpd_state.hpd1_bits |= hpd1_bits;
-		spin_unlock(&dev_priv->hpd_state.lock);
-
-		queue_work(dev_priv->wq, &dev_priv->hpd_work);
-	}
-
 	while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
 		uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
 		uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
@@ -1123,9 +820,9 @@
 		if (!intr0 && !(intr1 & ~delayed))
 			break;
 
-		if (intr0 & 0x00010000) {
+		if (intr0 & 0x001f0000) {
 			nv50_display_error_handler(dev);
-			intr0 &= ~0x00010000;
+			intr0 &= ~0x001f0000;
 		}
 
 		if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
@@ -1156,4 +853,3 @@
 		}
 	}
 }
-
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index c551f0b..f0e30b78 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -35,9 +35,7 @@
 #include "nouveau_crtc.h"
 #include "nv50_evo.h"
 
-void nv50_display_irq_handler(struct drm_device *dev);
 void nv50_display_irq_handler_bh(struct work_struct *work);
-void nv50_display_irq_hotplug_bh(struct work_struct *work);
 int nv50_display_early_init(struct drm_device *dev);
 void nv50_display_late_takedown(struct drm_device *dev);
 int nv50_display_create(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
new file mode 100644
index 0000000..887b2a9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_ramht.h"
+
+static void
+nv50_evo_channel_del(struct nouveau_channel **pevo)
+{
+	struct drm_nouveau_private *dev_priv;
+	struct nouveau_channel *evo = *pevo;
+
+	if (!evo)
+		return;
+	*pevo = NULL;
+
+	dev_priv = evo->dev->dev_private;
+	dev_priv->evo_alloc &= ~(1 << evo->id);
+
+	nouveau_gpuobj_channel_takedown(evo);
+	nouveau_bo_unmap(evo->pushbuf_bo);
+	nouveau_bo_ref(NULL, &evo->pushbuf_bo);
+
+	if (evo->user)
+		iounmap(evo->user);
+
+	kfree(evo);
+}
+
+int
+nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name,
+		    u32 tile_flags, u32 magic_flags, u32 offset, u32 limit)
+{
+	struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
+	struct drm_device *dev = evo->dev;
+	struct nouveau_gpuobj *obj = NULL;
+	int ret;
+
+	ret = nouveau_gpuobj_new(dev, dev_priv->evo, 6*4, 32, 0, &obj);
+	if (ret)
+		return ret;
+	obj->engine = NVOBJ_ENGINE_DISPLAY;
+
+	nv_wo32(obj,  0, (tile_flags << 22) | (magic_flags << 16) | class);
+	nv_wo32(obj,  4, limit);
+	nv_wo32(obj,  8, offset);
+	nv_wo32(obj, 12, 0x00000000);
+	nv_wo32(obj, 16, 0x00000000);
+	if (dev_priv->card_type < NV_C0)
+		nv_wo32(obj, 20, 0x00010000);
+	else
+		nv_wo32(obj, 20, 0x00020000);
+	dev_priv->engine.instmem.flush(dev);
+
+	ret = nouveau_ramht_insert(evo, name, obj);
+	nouveau_gpuobj_ref(NULL, &obj);
+	if (ret) {
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *evo;
+	int ret;
+
+	evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
+	if (!evo)
+		return -ENOMEM;
+	*pevo = evo;
+
+	for (evo->id = 0; evo->id < 5; evo->id++) {
+		if (dev_priv->evo_alloc & (1 << evo->id))
+			continue;
+
+		dev_priv->evo_alloc |= (1 << evo->id);
+		break;
+	}
+
+	if (evo->id == 5) {
+		kfree(evo);
+		return -ENODEV;
+	}
+
+	evo->dev = dev;
+	evo->user_get = 4;
+	evo->user_put = 0;
+
+	ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
+			     false, true, &evo->pushbuf_bo);
+	if (ret == 0)
+		ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
+	if (ret) {
+		NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
+		nv50_evo_channel_del(pevo);
+		return ret;
+	}
+
+	ret = nouveau_bo_map(evo->pushbuf_bo);
+	if (ret) {
+		NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
+		nv50_evo_channel_del(pevo);
+		return ret;
+	}
+
+	evo->user = ioremap(pci_resource_start(dev->pdev, 0) +
+			    NV50_PDISPLAY_USER(evo->id), PAGE_SIZE);
+	if (!evo->user) {
+		NV_ERROR(dev, "Error mapping EVO control regs.\n");
+		nv50_evo_channel_del(pevo);
+		return -ENOMEM;
+	}
+
+	/* bind primary evo channel's ramht to the channel */
+	if (dev_priv->evo && evo != dev_priv->evo)
+		nouveau_ramht_ref(dev_priv->evo->ramht, &evo->ramht, NULL);
+
+	return 0;
+}
+
+static int
+nv50_evo_channel_init(struct nouveau_channel *evo)
+{
+	struct drm_device *dev = evo->dev;
+	int id = evo->id, ret, i;
+	u64 pushbuf = evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT;
+	u32 tmp;
+
+	tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
+	if ((tmp & 0x009f0000) == 0x00020000)
+		nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
+
+	tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
+	if ((tmp & 0x003f0000) == 0x00030000)
+		nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
+
+	/* initialise fifo */
+	nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
+		     NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
+		     NV50_PDISPLAY_EVO_DMA_CB_VALID);
+	nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
+	nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
+	nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
+		     NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
+
+	nv_wr32(dev, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
+	nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
+		     NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
+	if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
+		NV_ERROR(dev, "EvoCh %d init timeout: 0x%08x\n", id,
+			 nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
+		return -EBUSY;
+	}
+
+	/* enable error reporting on the channel */
+	nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id);
+
+	evo->dma.max = (4096/4) - 2;
+	evo->dma.put = 0;
+	evo->dma.cur = evo->dma.put;
+	evo->dma.free = evo->dma.max - evo->dma.cur;
+
+	ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
+		OUT_RING(evo, 0);
+
+	return 0;
+}
+
+static void
+nv50_evo_channel_fini(struct nouveau_channel *evo)
+{
+	struct drm_device *dev = evo->dev;
+	int id = evo->id;
+
+	nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000);
+	nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
+	nv_wr32(dev, NV50_PDISPLAY_INTR_0, (1 << id));
+	nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
+	if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
+		NV_ERROR(dev, "EvoCh %d takedown timeout: 0x%08x\n", id,
+			 nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
+	}
+}
+
+static int
+nv50_evo_create(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj *ramht = NULL;
+	struct nouveau_channel *evo;
+	int ret;
+
+	/* create primary evo channel, the one we use for modesetting
+	 * purporses
+	 */
+	ret = nv50_evo_channel_new(dev, &dev_priv->evo);
+	if (ret)
+		return ret;
+	evo = dev_priv->evo;
+
+	/* setup object management on it, any other evo channel will
+	 * use this also as there's no per-channel support on the
+	 * hardware
+	 */
+	ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536,
+				 NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin);
+	if (ret) {
+		NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
+		nv50_evo_channel_del(&dev_priv->evo);
+		return ret;
+	}
+
+	ret = drm_mm_init(&evo->ramin_heap, 0, 32768);
+	if (ret) {
+		NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
+		nv50_evo_channel_del(&dev_priv->evo);
+		return ret;
+	}
+
+	ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht);
+	if (ret) {
+		NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
+		nv50_evo_channel_del(&dev_priv->evo);
+		return ret;
+	}
+
+	ret = nouveau_ramht_new(dev, ramht, &evo->ramht);
+	nouveau_gpuobj_ref(NULL, &ramht);
+	if (ret) {
+		nv50_evo_channel_del(&dev_priv->evo);
+		return ret;
+	}
+
+	/* create some default objects for the scanout memtypes we support */
+	if (dev_priv->chipset != 0x50) {
+		ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19,
+					  0, 0xffffffff);
+		if (ret) {
+			nv50_evo_channel_del(&dev_priv->evo);
+			return ret;
+		}
+
+
+		ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0x7a, 0x19,
+					  0, 0xffffffff);
+		if (ret) {
+			nv50_evo_channel_del(&dev_priv->evo);
+			return ret;
+		}
+	}
+
+	ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
+				  0, dev_priv->vram_size);
+	if (ret) {
+		nv50_evo_channel_del(&dev_priv->evo);
+		return ret;
+	}
+
+	return 0;
+}
+
+int
+nv50_evo_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (!dev_priv->evo) {
+		ret = nv50_evo_create(dev);
+		if (ret)
+			return ret;
+	}
+
+	return nv50_evo_channel_init(dev_priv->evo);
+}
+
+void
+nv50_evo_fini(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->evo) {
+		nv50_evo_channel_fini(dev_priv->evo);
+		nv50_evo_channel_del(&dev_priv->evo);
+	}
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
index aae1334..aa4f0d3 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.h
+++ b/drivers/gpu/drm/nouveau/nv50_evo.h
@@ -24,6 +24,15 @@
  *
  */
 
+#ifndef __NV50_EVO_H__
+#define __NV50_EVO_H__
+
+int  nv50_evo_init(struct drm_device *dev);
+void nv50_evo_fini(struct drm_device *dev);
+int  nv50_evo_dmaobj_new(struct nouveau_channel *, u32 class, u32 name,
+			 u32 tile_flags, u32 magic_flags,
+			 u32 offset, u32 limit);
+
 #define NV50_EVO_UPDATE                                              0x00000080
 #define NV50_EVO_UNK84                                               0x00000084
 #define NV50_EVO_UNK84_NOTIFY                                        0x40000000
@@ -111,3 +120,4 @@
 #define NV50_EVO_CRTC_SCALE_RES1                                     0x000008d8
 #define NV50_EVO_CRTC_SCALE_RES2                                     0x000008dc
 
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
index cd1988b..50290de 100644
--- a/drivers/gpu/drm/nouveau/nv50_fb.c
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -3,30 +3,75 @@
 #include "nouveau_drv.h"
 #include "nouveau_drm.h"
 
+struct nv50_fb_priv {
+	struct page *r100c08_page;
+	dma_addr_t r100c08;
+};
+
+static int
+nv50_fb_create(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv50_fb_priv *priv;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	if (!priv->r100c08_page) {
+		kfree(priv);
+		return -ENOMEM;
+	}
+
+	priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0,
+				     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+	if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) {
+		__free_page(priv->r100c08_page);
+		kfree(priv);
+		return -EFAULT;
+	}
+
+	dev_priv->engine.fb.priv = priv;
+	return 0;
+}
+
 int
 nv50_fb_init(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv50_fb_priv *priv;
+	int ret;
+
+	if (!dev_priv->engine.fb.priv) {
+		ret = nv50_fb_create(dev);
+		if (ret)
+			return ret;
+	}
+	priv = dev_priv->engine.fb.priv;
 
 	/* Not a clue what this is exactly.  Without pointing it at a
 	 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
 	 * cause IOMMU "read from address 0" errors (rh#561267)
 	 */
-	nv_wr32(dev, 0x100c08, dev_priv->gart_info.sg_dummy_bus >> 8);
+	nv_wr32(dev, 0x100c08, priv->r100c08 >> 8);
 
 	/* This is needed to get meaningful information from 100c90
 	 * on traps. No idea what these values mean exactly. */
 	switch (dev_priv->chipset) {
 	case 0x50:
-		nv_wr32(dev, 0x100c90, 0x0707ff);
+		nv_wr32(dev, 0x100c90, 0x000707ff);
 		break;
 	case 0xa3:
 	case 0xa5:
 	case 0xa8:
-		nv_wr32(dev, 0x100c90, 0x0d0fff);
+		nv_wr32(dev, 0x100c90, 0x000d0fff);
+		break;
+	case 0xaf:
+		nv_wr32(dev, 0x100c90, 0x089d1fff);
 		break;
 	default:
-		nv_wr32(dev, 0x100c90, 0x1d07ff);
+		nv_wr32(dev, 0x100c90, 0x001d07ff);
 		break;
 	}
 
@@ -36,12 +81,25 @@
 void
 nv50_fb_takedown(struct drm_device *dev)
 {
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv50_fb_priv *priv;
+
+	priv = dev_priv->engine.fb.priv;
+	if (!priv)
+		return;
+	dev_priv->engine.fb.priv = NULL;
+
+	pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
+		       PCI_DMA_BIDIRECTIONAL);
+	__free_page(priv->r100c08_page);
+	kfree(priv);
 }
 
 void
 nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	unsigned long flags;
 	u32 trap[6], idx, chinst;
 	int i, ch;
 
@@ -60,8 +118,10 @@
 		return;
 
 	chinst = (trap[2] << 16) | trap[1];
+
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
 	for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
-		struct nouveau_channel *chan = dev_priv->fifos[ch];
+		struct nouveau_channel *chan = dev_priv->channels.ptr[ch];
 
 		if (!chan || !chan->ramin)
 			continue;
@@ -69,6 +129,7 @@
 		if (chinst == chan->ramin->vinst >> 12)
 			break;
 	}
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
 
 	NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x "
 		     "channel %d (0x%08x)\n",
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 6dcf048..6d38cb1 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -3,27 +3,20 @@
 #include "nouveau_dma.h"
 #include "nouveau_ramht.h"
 #include "nouveau_fbcon.h"
+#include "nouveau_mm.h"
 
-void
+int
 nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
 	struct drm_device *dev = nfbdev->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_channel *chan = dev_priv->channel;
+	int ret;
 
-	if (info->state != FBINFO_STATE_RUNNING)
-		return;
-
-	if (!(info->flags & FBINFO_HWACCEL_DISABLED) &&
-	     RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) {
-		nouveau_fbcon_gpu_lockup(info);
-	}
-
-	if (info->flags & FBINFO_HWACCEL_DISABLED) {
-		cfb_fillrect(info, rect);
-		return;
-	}
+	ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
+	if (ret)
+		return ret;
 
 	if (rect->rop != ROP_COPY) {
 		BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
@@ -45,27 +38,21 @@
 		OUT_RING(chan, 3);
 	}
 	FIRE_RING(chan);
+	return 0;
 }
 
-void
+int
 nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
 	struct drm_device *dev = nfbdev->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_channel *chan = dev_priv->channel;
+	int ret;
 
-	if (info->state != FBINFO_STATE_RUNNING)
-		return;
-
-	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) {
-		nouveau_fbcon_gpu_lockup(info);
-	}
-
-	if (info->flags & FBINFO_HWACCEL_DISABLED) {
-		cfb_copyarea(info, region);
-		return;
-	}
+	ret = RING_SPACE(chan, 12);
+	if (ret)
+		return ret;
 
 	BEGIN_RING(chan, NvSub2D, 0x0110, 1);
 	OUT_RING(chan, 0);
@@ -80,9 +67,10 @@
 	OUT_RING(chan, 0);
 	OUT_RING(chan, region->sy);
 	FIRE_RING(chan);
+	return 0;
 }
 
-void
+int
 nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
@@ -92,23 +80,14 @@
 	uint32_t width, dwords, *data = (uint32_t *)image->data;
 	uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
 	uint32_t *palette = info->pseudo_palette;
+	int ret;
 
-	if (info->state != FBINFO_STATE_RUNNING)
-		return;
+	if (image->depth != 1)
+		return -ENODEV;
 
-	if (image->depth != 1) {
-		cfb_imageblit(info, image);
-		return;
-	}
-
-	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) {
-		nouveau_fbcon_gpu_lockup(info);
-	}
-
-	if (info->flags & FBINFO_HWACCEL_DISABLED) {
-		cfb_imageblit(info, image);
-		return;
-	}
+	ret = RING_SPACE(chan, 11);
+	if (ret)
+		return ret;
 
 	width = ALIGN(image->width, 32);
 	dwords = (width * image->height) >> 5;
@@ -134,11 +113,9 @@
 	while (dwords) {
 		int push = dwords > 2047 ? 2047 : dwords;
 
-		if (RING_SPACE(chan, push + 1)) {
-			nouveau_fbcon_gpu_lockup(info);
-			cfb_imageblit(info, image);
-			return;
-		}
+		ret = RING_SPACE(chan, push + 1);
+		if (ret)
+			return ret;
 
 		dwords -= push;
 
@@ -148,6 +125,7 @@
 	}
 
 	FIRE_RING(chan);
+	return 0;
 }
 
 int
@@ -157,12 +135,9 @@
 	struct drm_device *dev = nfbdev->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_channel *chan = dev_priv->channel;
-	struct nouveau_gpuobj *eng2d = NULL;
-	uint64_t fb;
+	struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo;
 	int ret, format;
 
-	fb = info->fix.smem_start - dev_priv->fb_phys + dev_priv->vm_vram_base;
-
 	switch (info->var.bits_per_pixel) {
 	case 8:
 		format = 0xf3;
@@ -190,12 +165,7 @@
 		return -EINVAL;
 	}
 
-	ret = nouveau_gpuobj_gr_new(dev_priv->channel, 0x502d, &eng2d);
-	if (ret)
-		return ret;
-
-	ret = nouveau_ramht_insert(dev_priv->channel, Nv2D, eng2d);
-	nouveau_gpuobj_ref(NULL, &eng2d);
+	ret = nouveau_gpuobj_gr_new(dev_priv->channel, Nv2D, 0x502d);
 	if (ret)
 		return ret;
 
@@ -253,8 +223,8 @@
 	OUT_RING(chan, info->fix.line_length);
 	OUT_RING(chan, info->var.xres_virtual);
 	OUT_RING(chan, info->var.yres_virtual);
-	OUT_RING(chan, upper_32_bits(fb));
-	OUT_RING(chan, lower_32_bits(fb));
+	OUT_RING(chan, upper_32_bits(nvbo->vma.offset));
+	OUT_RING(chan, lower_32_bits(nvbo->vma.offset));
 	BEGIN_RING(chan, NvSub2D, 0x0230, 2);
 	OUT_RING(chan, format);
 	OUT_RING(chan, 1);
@@ -262,8 +232,8 @@
 	OUT_RING(chan, info->fix.line_length);
 	OUT_RING(chan, info->var.xres_virtual);
 	OUT_RING(chan, info->var.yres_virtual);
-	OUT_RING(chan, upper_32_bits(fb));
-	OUT_RING(chan, lower_32_bits(fb));
+	OUT_RING(chan, upper_32_bits(nvbo->vma.offset));
+	OUT_RING(chan, lower_32_bits(nvbo->vma.offset));
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 1da65bd..8dd04c5 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -28,6 +28,7 @@
 #include "drm.h"
 #include "nouveau_drv.h"
 #include "nouveau_ramht.h"
+#include "nouveau_vm.h"
 
 static void
 nv50_fifo_playlist_update(struct drm_device *dev)
@@ -44,7 +45,8 @@
 
 	/* We never schedule channel 0 or 127 */
 	for (i = 1, nr = 0; i < 127; i++) {
-		if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) {
+		if (dev_priv->channels.ptr[i] &&
+		    dev_priv->channels.ptr[i]->ramfc) {
 			nv_wo32(cur, (nr * 4), i);
 			nr++;
 		}
@@ -60,7 +62,7 @@
 nv50_fifo_channel_enable(struct drm_device *dev, int channel)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan = dev_priv->fifos[channel];
+	struct nouveau_channel *chan = dev_priv->channels.ptr[channel];
 	uint32_t inst;
 
 	NV_DEBUG(dev, "ch%d\n", channel);
@@ -105,6 +107,7 @@
 {
 	NV_DEBUG(dev, "\n");
 
+	nouveau_irq_register(dev, 8, nv04_fifo_isr);
 	nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF);
 	nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
 }
@@ -118,7 +121,7 @@
 	NV_DEBUG(dev, "\n");
 
 	for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
-		if (dev_priv->fifos[i])
+		if (dev_priv->channels.ptr[i])
 			nv50_fifo_channel_enable(dev, i);
 		else
 			nv50_fifo_channel_disable(dev, i);
@@ -206,6 +209,9 @@
 	if (!pfifo->playlist[0])
 		return;
 
+	nv_wr32(dev, 0x2140, 0x00000000);
+	nouveau_irq_unregister(dev, 8);
+
 	nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
 	nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]);
 }
@@ -256,6 +262,11 @@
 	}
 	ramfc = chan->ramfc;
 
+	chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+			     NV50_USER(chan->id), PAGE_SIZE);
+	if (!chan->user)
+		return -ENOMEM;
+
 	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
 
 	nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4);
@@ -291,10 +302,23 @@
 nv50_fifo_destroy_context(struct nouveau_channel *chan)
 {
 	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
 	struct nouveau_gpuobj *ramfc = NULL;
+	unsigned long flags;
 
 	NV_DEBUG(dev, "ch%d\n", chan->id);
 
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	pfifo->reassign(dev, false);
+
+	/* Unload the context if it's the currently active one */
+	if (pfifo->channel_id(dev) == chan->id) {
+		pfifo->disable(dev);
+		pfifo->unload_context(dev);
+		pfifo->enable(dev);
+	}
+
 	/* This will ensure the channel is seen as disabled. */
 	nouveau_gpuobj_ref(chan->ramfc, &ramfc);
 	nouveau_gpuobj_ref(NULL, &chan->ramfc);
@@ -305,6 +329,14 @@
 		nv50_fifo_channel_disable(dev, 127);
 	nv50_fifo_playlist_update(dev);
 
+	pfifo->reassign(dev, true);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+	/* Free the channel resources */
+	if (chan->user) {
+		iounmap(chan->user);
+		chan->user = NULL;
+	}
 	nouveau_gpuobj_ref(NULL, &ramfc);
 	nouveau_gpuobj_ref(NULL, &chan->cache);
 }
@@ -392,7 +424,7 @@
 	if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1)
 		return 0;
 
-	chan = dev_priv->fifos[chid];
+	chan = dev_priv->channels.ptr[chid];
 	if (!chan) {
 		NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
 		return -EINVAL;
@@ -467,5 +499,5 @@
 void
 nv50_fifo_tlb_flush(struct drm_device *dev)
 {
-	nv50_vm_flush(dev, 5);
+	nv50_vm_flush_engine(dev, 5);
 }
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
index b2fab2b..6b149c0 100644
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -26,6 +26,28 @@
 #include "nouveau_drv.h"
 #include "nouveau_hw.h"
 
+#include "nv50_display.h"
+
+static void nv50_gpio_isr(struct drm_device *dev);
+static void nv50_gpio_isr_bh(struct work_struct *work);
+
+struct nv50_gpio_priv {
+	struct list_head handlers;
+	spinlock_t lock;
+};
+
+struct nv50_gpio_handler {
+	struct drm_device *dev;
+	struct list_head head;
+	struct work_struct work;
+	bool inhibit;
+
+	struct dcb_gpio_entry *gpio;
+
+	void (*handler)(void *data, int state);
+	void *data;
+};
+
 static int
 nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift)
 {
@@ -75,29 +97,123 @@
 	return 0;
 }
 
+int
+nv50_gpio_irq_register(struct drm_device *dev, enum dcb_gpio_tag tag,
+		       void (*handler)(void *, int), void *data)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+	struct nv50_gpio_priv *priv = pgpio->priv;
+	struct nv50_gpio_handler *gpioh;
+	struct dcb_gpio_entry *gpio;
+	unsigned long flags;
+
+	gpio = nouveau_bios_gpio_entry(dev, tag);
+	if (!gpio)
+		return -ENOENT;
+
+	gpioh = kzalloc(sizeof(*gpioh), GFP_KERNEL);
+	if (!gpioh)
+		return -ENOMEM;
+
+	INIT_WORK(&gpioh->work, nv50_gpio_isr_bh);
+	gpioh->dev  = dev;
+	gpioh->gpio = gpio;
+	gpioh->handler = handler;
+	gpioh->data = data;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	list_add(&gpioh->head, &priv->handlers);
+	spin_unlock_irqrestore(&priv->lock, flags);
+	return 0;
+}
+
 void
+nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
+			 void (*handler)(void *, int), void *data)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+	struct nv50_gpio_priv *priv = pgpio->priv;
+	struct nv50_gpio_handler *gpioh, *tmp;
+	struct dcb_gpio_entry *gpio;
+	unsigned long flags;
+
+	gpio = nouveau_bios_gpio_entry(dev, tag);
+	if (!gpio)
+		return;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	list_for_each_entry_safe(gpioh, tmp, &priv->handlers, head) {
+		if (gpioh->gpio != gpio ||
+		    gpioh->handler != handler ||
+		    gpioh->data != data)
+			continue;
+		list_del(&gpioh->head);
+		kfree(gpioh);
+	}
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+bool
 nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on)
 {
 	struct dcb_gpio_entry *gpio;
 	u32 reg, mask;
 
 	gpio = nouveau_bios_gpio_entry(dev, tag);
-	if (!gpio) {
-		NV_ERROR(dev, "gpio tag 0x%02x not found\n", tag);
-		return;
-	}
+	if (!gpio)
+		return false;
 
 	reg  = gpio->line < 16 ? 0xe050 : 0xe070;
 	mask = 0x00010001 << (gpio->line & 0xf);
 
 	nv_wr32(dev, reg + 4, mask);
-	nv_mask(dev, reg + 0, mask, on ? mask : 0);
+	reg = nv_mask(dev, reg + 0, mask, on ? mask : 0);
+	return (reg & mask) == mask;
+}
+
+static int
+nv50_gpio_create(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+	struct nv50_gpio_priv *priv;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&priv->handlers);
+	spin_lock_init(&priv->lock);
+	pgpio->priv = priv;
+	return 0;
+}
+
+static void
+nv50_gpio_destroy(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+	kfree(pgpio->priv);
+	pgpio->priv = NULL;
 }
 
 int
 nv50_gpio_init(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+	struct nv50_gpio_priv *priv;
+	int ret;
+
+	if (!pgpio->priv) {
+		ret = nv50_gpio_create(dev);
+		if (ret)
+			return ret;
+	}
+	priv = pgpio->priv;
 
 	/* disable, and ack any pending gpio interrupts */
 	nv_wr32(dev, 0xe050, 0x00000000);
@@ -107,5 +223,77 @@
 		nv_wr32(dev, 0xe074, 0xffffffff);
 	}
 
+	nouveau_irq_register(dev, 21, nv50_gpio_isr);
 	return 0;
 }
+
+void
+nv50_gpio_fini(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	nv_wr32(dev, 0xe050, 0x00000000);
+	if (dev_priv->chipset >= 0x90)
+		nv_wr32(dev, 0xe070, 0x00000000);
+	nouveau_irq_unregister(dev, 21);
+
+	nv50_gpio_destroy(dev);
+}
+
+static void
+nv50_gpio_isr_bh(struct work_struct *work)
+{
+	struct nv50_gpio_handler *gpioh =
+		container_of(work, struct nv50_gpio_handler, work);
+	struct drm_nouveau_private *dev_priv = gpioh->dev->dev_private;
+	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+	struct nv50_gpio_priv *priv = pgpio->priv;
+	unsigned long flags;
+	int state;
+
+	state = pgpio->get(gpioh->dev, gpioh->gpio->tag);
+	if (state < 0)
+		return;
+
+	gpioh->handler(gpioh->data, state);
+
+	spin_lock_irqsave(&priv->lock, flags);
+	gpioh->inhibit = false;
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void
+nv50_gpio_isr(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+	struct nv50_gpio_priv *priv = pgpio->priv;
+	struct nv50_gpio_handler *gpioh;
+	u32 intr0, intr1 = 0;
+	u32 hi, lo, ch;
+
+	intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
+	if (dev_priv->chipset >= 0x90)
+		intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
+
+	hi = (intr0 & 0x0000ffff) | (intr1 << 16);
+	lo = (intr0 >> 16) | (intr1 & 0xffff0000);
+	ch = hi | lo;
+
+	nv_wr32(dev, 0xe054, intr0);
+	if (dev_priv->chipset >= 0x90)
+		nv_wr32(dev, 0xe074, intr1);
+
+	spin_lock(&priv->lock);
+	list_for_each_entry(gpioh, &priv->handlers, head) {
+		if (!(ch & (1 << gpioh->gpio->line)))
+			continue;
+
+		if (gpioh->inhibit)
+			continue;
+		gpioh->inhibit = true;
+
+		queue_work(dev_priv->wq, &gpioh->work);
+	}
+	spin_unlock(&priv->lock);
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 8b669d0..c510e74 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -29,6 +29,12 @@
 #include "nouveau_drv.h"
 #include "nouveau_ramht.h"
 #include "nouveau_grctx.h"
+#include "nouveau_dma.h"
+#include "nouveau_vm.h"
+#include "nv50_evo.h"
+
+static int  nv50_graph_register(struct drm_device *);
+static void nv50_graph_isr(struct drm_device *);
 
 static void
 nv50_graph_init_reset(struct drm_device *dev)
@@ -46,6 +52,7 @@
 {
 	NV_DEBUG(dev, "\n");
 
+	nouveau_irq_register(dev, 12, nv50_graph_isr);
 	nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff);
 	nv_wr32(dev, 0x400138, 0xffffffff);
 	nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff);
@@ -145,12 +152,15 @@
 	nv50_graph_init_reset(dev);
 	nv50_graph_init_regs__nv(dev);
 	nv50_graph_init_regs(dev);
-	nv50_graph_init_intr(dev);
 
 	ret = nv50_graph_init_ctxctl(dev);
 	if (ret)
 		return ret;
 
+	ret = nv50_graph_register(dev);
+	if (ret)
+		return ret;
+	nv50_graph_init_intr(dev);
 	return 0;
 }
 
@@ -158,6 +168,8 @@
 nv50_graph_takedown(struct drm_device *dev)
 {
 	NV_DEBUG(dev, "\n");
+	nv_wr32(dev, 0x40013c, 0x00000000);
+	nouveau_irq_unregister(dev, 12);
 }
 
 void
@@ -190,7 +202,7 @@
 	inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
 
 	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
-		struct nouveau_channel *chan = dev_priv->fifos[i];
+		struct nouveau_channel *chan = dev_priv->channels.ptr[i];
 
 		if (chan && chan->ramin && chan->ramin->vinst == inst)
 			return chan;
@@ -211,7 +223,7 @@
 
 	NV_DEBUG(dev, "ch%d\n", chan->id);
 
-	ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0x1000,
+	ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0,
 				 NVOBJ_FLAG_ZERO_ALLOC |
 				 NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
 	if (ret)
@@ -234,6 +246,7 @@
 	nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12);
 
 	dev_priv->engine.instmem.flush(dev);
+	atomic_inc(&chan->vm->pgraph_refs);
 	return 0;
 }
 
@@ -242,18 +255,31 @@
 {
 	struct drm_device *dev = chan->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 	int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
+	unsigned long flags;
 
 	NV_DEBUG(dev, "ch%d\n", chan->id);
 
 	if (!chan->ramin)
 		return;
 
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	pgraph->fifo_access(dev, false);
+
+	if (pgraph->channel(dev) == chan)
+		pgraph->unload_context(dev);
+
 	for (i = hdr; i < hdr + 24; i += 4)
 		nv_wo32(chan->ramin, i, 0);
 	dev_priv->engine.instmem.flush(dev);
 
+	pgraph->fifo_access(dev, true);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
 	nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
+
+	atomic_dec(&chan->vm->pgraph_refs);
 }
 
 static int
@@ -306,7 +332,7 @@
 	return 0;
 }
 
-void
+static void
 nv50_graph_context_switch(struct drm_device *dev)
 {
 	uint32_t inst;
@@ -322,8 +348,8 @@
 }
 
 static int
-nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass,
-			   int mthd, uint32_t data)
+nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan,
+			   u32 class, u32 mthd, u32 data)
 {
 	struct nouveau_gpuobj *gpuobj;
 
@@ -340,8 +366,8 @@
 }
 
 static int
-nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan,
+			      u32 class, u32 mthd, u32 data)
 {
 	if (nouveau_notifier_offset(chan->nvsw.vblsem, &data))
 		return -ERANGE;
@@ -351,16 +377,16 @@
 }
 
 static int
-nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, int grclass,
-				   int mthd, uint32_t data)
+nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan,
+				   u32 class, u32 mthd, u32 data)
 {
 	chan->nvsw.vblsem_rval = data;
 	return 0;
 }
 
 static int
-nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass,
-			       int mthd, uint32_t data)
+nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan,
+			       u32 class, u32 mthd, u32 data)
 {
 	struct drm_device *dev = chan->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -368,45 +394,85 @@
 	if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1)
 		return -EINVAL;
 
-	if (!(nv_rd32(dev, NV50_PDISPLAY_INTR_EN) &
-		      NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data))) {
-		nv_wr32(dev, NV50_PDISPLAY_INTR_1,
-			NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(data));
-		nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
-			NV50_PDISPLAY_INTR_EN) |
-			NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data));
-	}
+	drm_vblank_get(dev, data);
 
+	chan->nvsw.vblsem_head = data;
 	list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting);
+
 	return 0;
 }
 
-static struct nouveau_pgraph_object_method nv50_graph_nvsw_methods[] = {
-	{ 0x018c, nv50_graph_nvsw_dma_vblsem },
-	{ 0x0400, nv50_graph_nvsw_vblsem_offset },
-	{ 0x0404, nv50_graph_nvsw_vblsem_release_val },
-	{ 0x0408, nv50_graph_nvsw_vblsem_release },
-	{}
-};
+static int
+nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan,
+			       u32 class, u32 mthd, u32 data)
+{
+	struct nouveau_page_flip_state s;
 
-struct nouveau_pgraph_object_class nv50_graph_grclass[] = {
-	{ 0x506e, true, nv50_graph_nvsw_methods }, /* nvsw */
-	{ 0x0030, false, NULL }, /* null */
-	{ 0x5039, false, NULL }, /* m2mf */
-	{ 0x502d, false, NULL }, /* 2d */
-	{ 0x50c0, false, NULL }, /* compute */
-	{ 0x85c0, false, NULL }, /* compute (nva3, nva5, nva8) */
-	{ 0x5097, false, NULL }, /* tesla (nv50) */
-	{ 0x8297, false, NULL }, /* tesla (nv8x/nv9x) */
-	{ 0x8397, false, NULL }, /* tesla (nva0, nvaa, nvac) */
-	{ 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */
-	{}
-};
+	if (!nouveau_finish_page_flip(chan, &s)) {
+		/* XXX - Do something here */
+	}
+
+	return 0;
+}
+
+static int
+nv50_graph_register(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->engine.graph.registered)
+		return 0;
+
+	NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+	NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem);
+	NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset);
+	NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val);
+	NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release);
+	NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip);
+
+	NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+	NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
+	NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
+
+	/* tesla */
+	if (dev_priv->chipset == 0x50)
+		NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
+	else
+	if (dev_priv->chipset < 0xa0)
+		NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
+	else {
+		switch (dev_priv->chipset) {
+		case 0xa0:
+		case 0xaa:
+		case 0xac:
+			NVOBJ_CLASS(dev, 0x8397, GR);
+			break;
+		case 0xa3:
+		case 0xa5:
+		case 0xa8:
+			NVOBJ_CLASS(dev, 0x8597, GR);
+			break;
+		case 0xaf:
+			NVOBJ_CLASS(dev, 0x8697, GR);
+			break;
+		}
+	}
+
+	/* compute */
+	NVOBJ_CLASS(dev, 0x50c0, GR);
+	if (dev_priv->chipset  > 0xa0 &&
+	    dev_priv->chipset != 0xaa &&
+	    dev_priv->chipset != 0xac)
+		NVOBJ_CLASS(dev, 0x85c0, GR);
+
+	dev_priv->engine.graph.registered = true;
+	return 0;
+}
 
 void
 nv50_graph_tlb_flush(struct drm_device *dev)
 {
-	nv50_vm_flush(dev, 0);
+	nv50_vm_flush_engine(dev, 0);
 }
 
 void
@@ -449,8 +515,500 @@
 			 nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388));
 	}
 
-	nv50_vm_flush(dev, 0);
+	nv50_vm_flush_engine(dev, 0);
 
 	nv_mask(dev, 0x400500, 0x00000001, 0x00000001);
 	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 }
+
+static struct nouveau_enum nv50_mp_exec_error_names[] =
+{
+	{ 3, "STACK_UNDERFLOW" },
+	{ 4, "QUADON_ACTIVE" },
+	{ 8, "TIMEOUT" },
+	{ 0x10, "INVALID_OPCODE" },
+	{ 0x40, "BREAKPOINT" },
+	{}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_m2mf[] = {
+	{ 0x00000001, "NOTIFY" },
+	{ 0x00000002, "IN" },
+	{ 0x00000004, "OUT" },
+	{}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_vfetch[] = {
+	{ 0x00000001, "FAULT" },
+	{}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_strmout[] = {
+	{ 0x00000001, "FAULT" },
+	{}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_ccache[] = {
+	{ 0x00000001, "FAULT" },
+	{}
+};
+
+/* There must be a *lot* of these. Will take some time to gather them up. */
+static struct nouveau_enum nv50_data_error_names[] = {
+	{ 4,	"INVALID_VALUE" },
+	{ 5,	"INVALID_ENUM" },
+	{ 8,	"INVALID_OBJECT" },
+	{ 0xc,	"INVALID_BITFIELD" },
+	{ 0x28,	"MP_NO_REG_SPACE" },
+	{ 0x2b,	"MP_BLOCK_SIZE_MISMATCH" },
+	{}
+};
+
+static struct nouveau_bitfield nv50_graph_intr[] = {
+	{ 0x00000001, "NOTIFY" },
+	{ 0x00000002, "COMPUTE_QUERY" },
+	{ 0x00000010, "ILLEGAL_MTHD" },
+	{ 0x00000020, "ILLEGAL_CLASS" },
+	{ 0x00000040, "DOUBLE_NOTIFY" },
+	{ 0x00001000, "CONTEXT_SWITCH" },
+	{ 0x00010000, "BUFFER_NOTIFY" },
+	{ 0x00100000, "DATA_ERROR" },
+	{ 0x00200000, "TRAP" },
+	{ 0x01000000, "SINGLE_STEP" },
+	{}
+};
+
+static void
+nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t units = nv_rd32(dev, 0x1540);
+	uint32_t addr, mp10, status, pc, oplow, ophigh;
+	int i;
+	int mps = 0;
+	for (i = 0; i < 4; i++) {
+		if (!(units & 1 << (i+24)))
+			continue;
+		if (dev_priv->chipset < 0xa0)
+			addr = 0x408200 + (tpid << 12) + (i << 7);
+		else
+			addr = 0x408100 + (tpid << 11) + (i << 7);
+		mp10 = nv_rd32(dev, addr + 0x10);
+		status = nv_rd32(dev, addr + 0x14);
+		if (!status)
+			continue;
+		if (display) {
+			nv_rd32(dev, addr + 0x20);
+			pc = nv_rd32(dev, addr + 0x24);
+			oplow = nv_rd32(dev, addr + 0x70);
+			ophigh= nv_rd32(dev, addr + 0x74);
+			NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
+					"TP %d MP %d: ", tpid, i);
+			nouveau_enum_print(nv50_mp_exec_error_names, status);
+			printk(" at %06x warp %d, opcode %08x %08x\n",
+					pc&0xffffff, pc >> 24,
+					oplow, ophigh);
+		}
+		nv_wr32(dev, addr + 0x10, mp10);
+		nv_wr32(dev, addr + 0x14, 0);
+		mps++;
+	}
+	if (!mps && display)
+		NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
+				"No MPs claiming errors?\n", tpid);
+}
+
+static void
+nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
+		uint32_t ustatus_new, int display, const char *name)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int tps = 0;
+	uint32_t units = nv_rd32(dev, 0x1540);
+	int i, r;
+	uint32_t ustatus_addr, ustatus;
+	for (i = 0; i < 16; i++) {
+		if (!(units & (1 << i)))
+			continue;
+		if (dev_priv->chipset < 0xa0)
+			ustatus_addr = ustatus_old + (i << 12);
+		else
+			ustatus_addr = ustatus_new + (i << 11);
+		ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
+		if (!ustatus)
+			continue;
+		tps++;
+		switch (type) {
+		case 6: /* texture error... unknown for now */
+			nv50_fb_vm_trap(dev, display, name);
+			if (display) {
+				NV_ERROR(dev, "magic set %d:\n", i);
+				for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
+					NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+						nv_rd32(dev, r));
+			}
+			break;
+		case 7: /* MP error */
+			if (ustatus & 0x00010000) {
+				nv50_pgraph_mp_trap(dev, i, display);
+				ustatus &= ~0x00010000;
+			}
+			break;
+		case 8: /* TPDMA error */
+			{
+			uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
+			uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
+			uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
+			uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
+			uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
+			uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
+			uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
+			nv50_fb_vm_trap(dev, display, name);
+			/* 2d engine destination */
+			if (ustatus & 0x00000010) {
+				if (display) {
+					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
+							i, e14, e10);
+					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+							i, e0c, e18, e1c, e20, e24);
+				}
+				ustatus &= ~0x00000010;
+			}
+			/* Render target */
+			if (ustatus & 0x00000040) {
+				if (display) {
+					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
+							i, e14, e10);
+					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+							i, e0c, e18, e1c, e20, e24);
+				}
+				ustatus &= ~0x00000040;
+			}
+			/* CUDA memory: l[], g[] or stack. */
+			if (ustatus & 0x00000080) {
+				if (display) {
+					if (e18 & 0x80000000) {
+						/* g[] read fault? */
+						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
+								i, e14, e10 | ((e18 >> 24) & 0x1f));
+						e18 &= ~0x1f000000;
+					} else if (e18 & 0xc) {
+						/* g[] write fault? */
+						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
+								i, e14, e10 | ((e18 >> 7) & 0x1f));
+						e18 &= ~0x00000f80;
+					} else {
+						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
+								i, e14, e10);
+					}
+					NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+							i, e0c, e18, e1c, e20, e24);
+				}
+				ustatus &= ~0x00000080;
+			}
+			}
+			break;
+		}
+		if (ustatus) {
+			if (display)
+				NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
+		}
+		nv_wr32(dev, ustatus_addr, 0xc0000000);
+	}
+
+	if (!tps && display)
+		NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
+}
+
+static int
+nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid)
+{
+	u32 status = nv_rd32(dev, 0x400108);
+	u32 ustatus;
+
+	if (!status && display) {
+		NV_INFO(dev, "PGRAPH - TRAP: no units reporting traps?\n");
+		return 1;
+	}
+
+	/* DISPATCH: Relays commands to other units and handles NOTIFY,
+	 * COND, QUERY. If you get a trap from it, the command is still stuck
+	 * in DISPATCH and you need to do something about it. */
+	if (status & 0x001) {
+		ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
+		if (!ustatus && display) {
+			NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
+		}
+
+		nv_wr32(dev, 0x400500, 0x00000000);
+
+		/* Known to be triggered by screwed up NOTIFY and COND... */
+		if (ustatus & 0x00000001) {
+			u32 addr = nv_rd32(dev, 0x400808);
+			u32 subc = (addr & 0x00070000) >> 16;
+			u32 mthd = (addr & 0x00001ffc);
+			u32 datal = nv_rd32(dev, 0x40080c);
+			u32 datah = nv_rd32(dev, 0x400810);
+			u32 class = nv_rd32(dev, 0x400814);
+			u32 r848 = nv_rd32(dev, 0x400848);
+
+			NV_INFO(dev, "PGRAPH - TRAP DISPATCH_FAULT\n");
+			if (display && (addr & 0x80000000)) {
+				NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
+					     "subc %d class 0x%04x mthd 0x%04x "
+					     "data 0x%08x%08x "
+					     "400808 0x%08x 400848 0x%08x\n",
+					chid, inst, subc, class, mthd, datah,
+					datal, addr, r848);
+			} else
+			if (display) {
+				NV_INFO(dev, "PGRAPH - no stuck command?\n");
+			}
+
+			nv_wr32(dev, 0x400808, 0);
+			nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
+			nv_wr32(dev, 0x400848, 0);
+			ustatus &= ~0x00000001;
+		}
+
+		if (ustatus & 0x00000002) {
+			u32 addr = nv_rd32(dev, 0x40084c);
+			u32 subc = (addr & 0x00070000) >> 16;
+			u32 mthd = (addr & 0x00001ffc);
+			u32 data = nv_rd32(dev, 0x40085c);
+			u32 class = nv_rd32(dev, 0x400814);
+
+			NV_INFO(dev, "PGRAPH - TRAP DISPATCH_QUERY\n");
+			if (display && (addr & 0x80000000)) {
+				NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
+					     "subc %d class 0x%04x mthd 0x%04x "
+					     "data 0x%08x 40084c 0x%08x\n",
+					chid, inst, subc, class, mthd,
+					data, addr);
+			} else
+			if (display) {
+				NV_INFO(dev, "PGRAPH - no stuck command?\n");
+			}
+
+			nv_wr32(dev, 0x40084c, 0);
+			ustatus &= ~0x00000002;
+		}
+
+		if (ustatus && display) {
+			NV_INFO(dev, "PGRAPH - TRAP_DISPATCH (unknown "
+				      "0x%08x)\n", ustatus);
+		}
+
+		nv_wr32(dev, 0x400804, 0xc0000000);
+		nv_wr32(dev, 0x400108, 0x001);
+		status &= ~0x001;
+		if (!status)
+			return 0;
+	}
+
+	/* M2MF: Memory to memory copy engine. */
+	if (status & 0x002) {
+		u32 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
+		if (display) {
+			NV_INFO(dev, "PGRAPH - TRAP_M2MF");
+			nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - TRAP_M2MF %08x %08x %08x %08x\n",
+				nv_rd32(dev, 0x406804), nv_rd32(dev, 0x406808),
+				nv_rd32(dev, 0x40680c), nv_rd32(dev, 0x406810));
+
+		}
+
+		/* No sane way found yet -- just reset the bugger. */
+		nv_wr32(dev, 0x400040, 2);
+		nv_wr32(dev, 0x400040, 0);
+		nv_wr32(dev, 0x406800, 0xc0000000);
+		nv_wr32(dev, 0x400108, 0x002);
+		status &= ~0x002;
+	}
+
+	/* VFETCH: Fetches data from vertex buffers. */
+	if (status & 0x004) {
+		u32 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
+		if (display) {
+			NV_INFO(dev, "PGRAPH - TRAP_VFETCH");
+			nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - TRAP_VFETCH %08x %08x %08x %08x\n",
+				nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08),
+				nv_rd32(dev, 0x400c0c), nv_rd32(dev, 0x400c10));
+		}
+
+		nv_wr32(dev, 0x400c04, 0xc0000000);
+		nv_wr32(dev, 0x400108, 0x004);
+		status &= ~0x004;
+	}
+
+	/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
+	if (status & 0x008) {
+		ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
+		if (display) {
+			NV_INFO(dev, "PGRAPH - TRAP_STRMOUT");
+			nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - TRAP_STRMOUT %08x %08x %08x %08x\n",
+				nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808),
+				nv_rd32(dev, 0x40180c), nv_rd32(dev, 0x401810));
+
+		}
+
+		/* No sane way found yet -- just reset the bugger. */
+		nv_wr32(dev, 0x400040, 0x80);
+		nv_wr32(dev, 0x400040, 0);
+		nv_wr32(dev, 0x401800, 0xc0000000);
+		nv_wr32(dev, 0x400108, 0x008);
+		status &= ~0x008;
+	}
+
+	/* CCACHE: Handles code and c[] caches and fills them. */
+	if (status & 0x010) {
+		ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
+		if (display) {
+			NV_INFO(dev, "PGRAPH - TRAP_CCACHE");
+			nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x"
+				     " %08x %08x %08x\n",
+				nv_rd32(dev, 0x405800), nv_rd32(dev, 0x405804),
+				nv_rd32(dev, 0x405808), nv_rd32(dev, 0x40580c),
+				nv_rd32(dev, 0x405810), nv_rd32(dev, 0x405814),
+				nv_rd32(dev, 0x40581c));
+
+		}
+
+		nv_wr32(dev, 0x405018, 0xc0000000);
+		nv_wr32(dev, 0x400108, 0x010);
+		status &= ~0x010;
+	}
+
+	/* Unknown, not seen yet... 0x402000 is the only trap status reg
+	 * remaining, so try to handle it anyway. Perhaps related to that
+	 * unknown DMA slot on tesla? */
+	if (status & 0x20) {
+		ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
+		if (display)
+			NV_INFO(dev, "PGRAPH - TRAP_UNKC04 0x%08x\n", ustatus);
+		nv_wr32(dev, 0x402000, 0xc0000000);
+		/* no status modifiction on purpose */
+	}
+
+	/* TEXTURE: CUDA texturing units */
+	if (status & 0x040) {
+		nv50_pgraph_tp_trap(dev, 6, 0x408900, 0x408600, display,
+				    "PGRAPH - TRAP_TEXTURE");
+		nv_wr32(dev, 0x400108, 0x040);
+		status &= ~0x040;
+	}
+
+	/* MP: CUDA execution engines. */
+	if (status & 0x080) {
+		nv50_pgraph_tp_trap(dev, 7, 0x408314, 0x40831c, display,
+				    "PGRAPH - TRAP_MP");
+		nv_wr32(dev, 0x400108, 0x080);
+		status &= ~0x080;
+	}
+
+	/* TPDMA:  Handles TP-initiated uncached memory accesses:
+	 * l[], g[], stack, 2d surfaces, render targets. */
+	if (status & 0x100) {
+		nv50_pgraph_tp_trap(dev, 8, 0x408e08, 0x408708, display,
+				    "PGRAPH - TRAP_TPDMA");
+		nv_wr32(dev, 0x400108, 0x100);
+		status &= ~0x100;
+	}
+
+	if (status) {
+		if (display)
+			NV_INFO(dev, "PGRAPH - TRAP: unknown 0x%08x\n", status);
+		nv_wr32(dev, 0x400108, status);
+	}
+
+	return 1;
+}
+
+static int
+nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
+	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+		chan = dev_priv->channels.ptr[i];
+		if (!chan || !chan->ramin)
+			continue;
+
+		if (inst == chan->ramin->vinst)
+			break;
+	}
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+	return i;
+}
+
+static void
+nv50_graph_isr(struct drm_device *dev)
+{
+	u32 stat;
+
+	while ((stat = nv_rd32(dev, 0x400100))) {
+		u64 inst = (u64)(nv_rd32(dev, 0x40032c) & 0x0fffffff) << 12;
+		u32 chid = nv50_graph_isr_chid(dev, inst);
+		u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+		u32 subc = (addr & 0x00070000) >> 16;
+		u32 mthd = (addr & 0x00001ffc);
+		u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+		u32 class = nv_rd32(dev, 0x400814);
+		u32 show = stat;
+
+		if (stat & 0x00000010) {
+			if (!nouveau_gpuobj_mthd_call2(dev, chid, class,
+						       mthd, data))
+				show &= ~0x00000010;
+		}
+
+		if (stat & 0x00001000) {
+			nv_wr32(dev, 0x400500, 0x00000000);
+			nv_wr32(dev, 0x400100, 0x00001000);
+			nv_mask(dev, 0x40013c, 0x00001000, 0x00000000);
+			nv50_graph_context_switch(dev);
+			stat &= ~0x00001000;
+			show &= ~0x00001000;
+		}
+
+		show = (show && nouveau_ratelimit()) ? show : 0;
+
+		if (show & 0x00100000) {
+			u32 ecode = nv_rd32(dev, 0x400110);
+			NV_INFO(dev, "PGRAPH - DATA_ERROR ");
+			nouveau_enum_print(nv50_data_error_names, ecode);
+			printk("\n");
+		}
+
+		if (stat & 0x00200000) {
+			if (!nv50_pgraph_trap_handler(dev, show, inst, chid))
+				show &= ~0x00200000;
+		}
+
+		nv_wr32(dev, 0x400100, stat);
+		nv_wr32(dev, 0x400500, 0x00010001);
+
+		if (show) {
+			NV_INFO(dev, "PGRAPH -");
+			nouveau_bitfield_print(nv50_graph_intr, show);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d "
+				     "class 0x%04x mthd 0x%04x data 0x%08x\n",
+				chid, inst, subc, class, mthd, data);
+		}
+	}
+
+	if (nv_rd32(dev, 0x400824) & (1 << 31))
+		nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index b773229..adac4da 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -27,14 +27,20 @@
 
 #include "drmP.h"
 #include "drm.h"
+
 #include "nouveau_drv.h"
+#include "nouveau_vm.h"
+
+#define BAR1_VM_BASE 0x0020000000ULL
+#define BAR1_VM_SIZE pci_resource_len(dev->pdev, 1)
+#define BAR3_VM_BASE 0x0000000000ULL
+#define BAR3_VM_SIZE pci_resource_len(dev->pdev, 3)
 
 struct nv50_instmem_priv {
 	uint32_t save1700[5]; /* 0x1700->0x1710 */
 
-	struct nouveau_gpuobj *pramin_pt;
-	struct nouveau_gpuobj *pramin_bar;
-	struct nouveau_gpuobj *fb_bar;
+	struct nouveau_gpuobj *bar1_dmaobj;
+	struct nouveau_gpuobj *bar3_dmaobj;
 };
 
 static void
@@ -48,6 +54,7 @@
 		return;
 
 	nouveau_gpuobj_ref(NULL, &chan->ramfc);
+	nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
 	nouveau_gpuobj_ref(NULL, &chan->vm_pd);
 	if (chan->ramin_heap.free_stack.next)
 		drm_mm_takedown(&chan->ramin_heap);
@@ -56,14 +63,14 @@
 }
 
 static int
-nv50_channel_new(struct drm_device *dev, u32 size,
+nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
 		 struct nouveau_channel **pchan)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
 	u32  fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200;
 	struct nouveau_channel *chan;
-	int ret;
+	int ret, i;
 
 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
 	if (!chan)
@@ -92,6 +99,17 @@
 		return ret;
 	}
 
+	for (i = 0; i < 0x4000; i += 8) {
+		nv_wo32(chan->vm_pd, i + 0, 0x00000000);
+		nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
+	}
+
+	ret = nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
+	if (ret) {
+		nv50_channel_del(&chan);
+		return ret;
+	}
+
 	ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
 				      chan->ramin->pinst + fc,
 				      chan->ramin->vinst + fc, 0x100,
@@ -111,6 +129,7 @@
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nv50_instmem_priv *priv;
 	struct nouveau_channel *chan;
+	struct nouveau_vm *vm;
 	int ret, i;
 	u32 tmp;
 
@@ -127,112 +146,89 @@
 	ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size);
 	if (ret) {
 		NV_ERROR(dev, "Failed to init RAMIN heap\n");
-		return -ENOMEM;
+		goto error;
 	}
 
-	/* we need a channel to plug into the hw to control the BARs */
-	ret = nv50_channel_new(dev, 128*1024, &dev_priv->fifos[0]);
+	/* BAR3 */
+	ret = nouveau_vm_new(dev, BAR3_VM_BASE, BAR3_VM_SIZE, BAR3_VM_BASE,
+			     29, 12, 16, &dev_priv->bar3_vm);
 	if (ret)
-		return ret;
-	chan = dev_priv->fifos[127] = dev_priv->fifos[0];
+		goto error;
 
-	/* allocate page table for PRAMIN BAR */
-	ret = nouveau_gpuobj_new(dev, chan, (dev_priv->ramin_size >> 12) * 8,
-				 0x1000, NVOBJ_FLAG_ZERO_ALLOC,
-				 &priv->pramin_pt);
+	ret = nouveau_gpuobj_new(dev, NULL, (BAR3_VM_SIZE >> 12) * 8,
+				 0x1000, NVOBJ_FLAG_DONT_MAP |
+				 NVOBJ_FLAG_ZERO_ALLOC,
+				 &dev_priv->bar3_vm->pgt[0].obj);
 	if (ret)
-		return ret;
+		goto error;
+	dev_priv->bar3_vm->pgt[0].page_shift = 12;
+	dev_priv->bar3_vm->pgt[0].refcount = 1;
 
-	nv_wo32(chan->vm_pd, 0x0000, priv->pramin_pt->vinst | 0x63);
-	nv_wo32(chan->vm_pd, 0x0004, 0);
+	nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj);
 
-	/* DMA object for PRAMIN BAR */
-	ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->pramin_bar);
+	ret = nv50_channel_new(dev, 128 * 1024, dev_priv->bar3_vm, &chan);
 	if (ret)
-		return ret;
-	nv_wo32(priv->pramin_bar, 0x00, 0x7fc00000);
-	nv_wo32(priv->pramin_bar, 0x04, dev_priv->ramin_size - 1);
-	nv_wo32(priv->pramin_bar, 0x08, 0x00000000);
-	nv_wo32(priv->pramin_bar, 0x0c, 0x00000000);
-	nv_wo32(priv->pramin_bar, 0x10, 0x00000000);
-	nv_wo32(priv->pramin_bar, 0x14, 0x00000000);
+		goto error;
+	dev_priv->channels.ptr[0] = dev_priv->channels.ptr[127] = chan;
 
-	/* map channel into PRAMIN, gpuobj didn't do it for us */
-	ret = nv50_instmem_bind(dev, chan->ramin);
+	ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR3_VM_BASE, BAR3_VM_SIZE,
+				  NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
+				  NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
+				  &priv->bar3_dmaobj);
 	if (ret)
-		return ret;
+		goto error;
 
-	/* poke regs... */
 	nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12));
 	nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12));
-	nv_wr32(dev, 0x00170c, 0x80000000 | (priv->pramin_bar->cinst >> 4));
-
-	tmp = nv_ri32(dev, 0);
-	nv_wi32(dev, 0, ~tmp);
-	if (nv_ri32(dev, 0) != ~tmp) {
-		NV_ERROR(dev, "PRAMIN readback failed\n");
-		return -EIO;
-	}
-	nv_wi32(dev, 0, tmp);
-
-	dev_priv->ramin_available = true;
-
-	/* Determine VM layout */
-	dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK);
-	dev_priv->vm_gart_size = NV50_VM_BLOCK;
-
-	dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
-	dev_priv->vm_vram_size = dev_priv->vram_size;
-	if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
-		dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
-	dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
-	dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK;
-
-	dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size;
-
-	NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n",
-		 dev_priv->vm_gart_base,
-		 dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1);
-	NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n",
-		 dev_priv->vm_vram_base,
-		 dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1);
-
-	/* VRAM page table(s), mapped into VM at +1GiB  */
-	for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
-		ret = nouveau_gpuobj_new(dev, NULL, NV50_VM_BLOCK / 0x10000 * 8,
-					 0, NVOBJ_FLAG_ZERO_ALLOC,
-					 &chan->vm_vram_pt[i]);
-		if (ret) {
-			NV_ERROR(dev, "Error creating VRAM PGT: %d\n", ret);
-			dev_priv->vm_vram_pt_nr = i;
-			return ret;
-		}
-		dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i];
-
-		nv_wo32(chan->vm_pd, 0x10 + (i*8),
-			chan->vm_vram_pt[i]->vinst | 0x61);
-		nv_wo32(chan->vm_pd, 0x14 + (i*8), 0);
-	}
-
-	/* DMA object for FB BAR */
-	ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->fb_bar);
-	if (ret)
-		return ret;
-	nv_wo32(priv->fb_bar, 0x00, 0x7fc00000);
-	nv_wo32(priv->fb_bar, 0x04, 0x40000000 +
-				    pci_resource_len(dev->pdev, 1) - 1);
-	nv_wo32(priv->fb_bar, 0x08, 0x40000000);
-	nv_wo32(priv->fb_bar, 0x0c, 0x00000000);
-	nv_wo32(priv->fb_bar, 0x10, 0x00000000);
-	nv_wo32(priv->fb_bar, 0x14, 0x00000000);
+	nv_wr32(dev, 0x00170c, 0x80000000 | (priv->bar3_dmaobj->cinst >> 4));
 
 	dev_priv->engine.instmem.flush(dev);
+	dev_priv->ramin_available = true;
 
-	nv_wr32(dev, 0x001708, 0x80000000 | (priv->fb_bar->cinst >> 4));
+	tmp = nv_ro32(chan->ramin, 0);
+	nv_wo32(chan->ramin, 0, ~tmp);
+	if (nv_ro32(chan->ramin, 0) != ~tmp) {
+		NV_ERROR(dev, "PRAMIN readback failed\n");
+		ret = -EIO;
+		goto error;
+	}
+	nv_wo32(chan->ramin, 0, tmp);
+
+	/* BAR1 */
+	ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE,
+			     29, 12, 16, &vm);
+	if (ret)
+		goto error;
+
+	ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, chan->vm_pd);
+	if (ret)
+		goto error;
+	nouveau_vm_ref(NULL, &vm, NULL);
+
+	ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR1_VM_BASE, BAR1_VM_SIZE,
+				  NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
+				  NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
+				  &priv->bar1_dmaobj);
+	if (ret)
+		goto error;
+
+	nv_wr32(dev, 0x001708, 0x80000000 | (priv->bar1_dmaobj->cinst >> 4));
 	for (i = 0; i < 8; i++)
 		nv_wr32(dev, 0x1900 + (i*4), 0);
 
+	/* Create shared channel VM, space is reserved at the beginning
+	 * to catch "NULL pointer" references
+	 */
+	ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
+			     29, 12, 16, &dev_priv->chan_vm);
+	if (ret)
+		return ret;
+
 	return 0;
+
+error:
+	nv50_instmem_takedown(dev);
+	return ret;
 }
 
 void
@@ -240,7 +236,7 @@
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
-	struct nouveau_channel *chan = dev_priv->fifos[0];
+	struct nouveau_channel *chan = dev_priv->channels.ptr[0];
 	int i;
 
 	NV_DEBUG(dev, "\n");
@@ -250,23 +246,23 @@
 
 	dev_priv->ramin_available = false;
 
-	/* Restore state from before init */
+	nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
+
 	for (i = 0x1700; i <= 0x1710; i += 4)
 		nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
 
-	nouveau_gpuobj_ref(NULL, &priv->fb_bar);
-	nouveau_gpuobj_ref(NULL, &priv->pramin_bar);
-	nouveau_gpuobj_ref(NULL, &priv->pramin_pt);
+	nouveau_gpuobj_ref(NULL, &priv->bar3_dmaobj);
+	nouveau_gpuobj_ref(NULL, &priv->bar1_dmaobj);
 
-	/* Destroy dummy channel */
-	if (chan) {
-		for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
-			nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
-		dev_priv->vm_vram_pt_nr = 0;
+	nouveau_vm_ref(NULL, &dev_priv->bar1_vm, chan->vm_pd);
+	dev_priv->channels.ptr[127] = 0;
+	nv50_channel_del(&dev_priv->channels.ptr[0]);
 
-		nv50_channel_del(&dev_priv->fifos[0]);
-		dev_priv->fifos[127] = NULL;
-	}
+	nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj);
+	nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
+
+	if (dev_priv->ramin_heap.free_stack.next)
+		drm_mm_takedown(&dev_priv->ramin_heap);
 
 	dev_priv->engine.instmem.priv = NULL;
 	kfree(priv);
@@ -276,16 +272,8 @@
 nv50_instmem_suspend(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan = dev_priv->fifos[0];
-	struct nouveau_gpuobj *ramin = chan->ramin;
-	int i;
 
-	ramin->im_backing_suspend = vmalloc(ramin->size);
-	if (!ramin->im_backing_suspend)
-		return -ENOMEM;
-
-	for (i = 0; i < ramin->size; i += 4)
-		ramin->im_backing_suspend[i/4] = nv_ri32(dev, i);
+	dev_priv->ramin_available = false;
 	return 0;
 }
 
@@ -294,146 +282,121 @@
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
-	struct nouveau_channel *chan = dev_priv->fifos[0];
-	struct nouveau_gpuobj *ramin = chan->ramin;
+	struct nouveau_channel *chan = dev_priv->channels.ptr[0];
 	int i;
 
-	dev_priv->ramin_available = false;
-	dev_priv->ramin_base = ~0;
-	for (i = 0; i < ramin->size; i += 4)
-		nv_wo32(ramin, i, ramin->im_backing_suspend[i/4]);
-	dev_priv->ramin_available = true;
-	vfree(ramin->im_backing_suspend);
-	ramin->im_backing_suspend = NULL;
-
 	/* Poke the relevant regs, and pray it works :) */
 	nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12));
 	nv_wr32(dev, NV50_PUNK_UNK1710, 0);
 	nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) |
 					 NV50_PUNK_BAR_CFG_BASE_VALID);
-	nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->cinst >> 4) |
+	nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->bar1_dmaobj->cinst >> 4) |
 					NV50_PUNK_BAR1_CTXDMA_VALID);
-	nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->cinst >> 4) |
+	nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->bar3_dmaobj->cinst >> 4) |
 					NV50_PUNK_BAR3_CTXDMA_VALID);
 
 	for (i = 0; i < 8; i++)
 		nv_wr32(dev, 0x1900 + (i*4), 0);
+
+	dev_priv->ramin_available = true;
 }
 
+struct nv50_gpuobj_node {
+	struct nouveau_vram *vram;
+	struct nouveau_vma chan_vma;
+	u32 align;
+};
+
+
 int
-nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
-		      uint32_t *sz)
+nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
 {
+	struct drm_device *dev = gpuobj->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+	struct nv50_gpuobj_node *node = NULL;
 	int ret;
 
-	if (gpuobj->im_backing)
-		return -EINVAL;
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+	node->align = align;
 
-	*sz = ALIGN(*sz, 4096);
-	if (*sz == 0)
-		return -EINVAL;
+	size  = (size + 4095) & ~4095;
+	align = max(align, (u32)4096);
 
-	ret = nouveau_bo_new(dev, NULL, *sz, 0, TTM_PL_FLAG_VRAM, 0, 0x0000,
-			     true, false, &gpuobj->im_backing);
+	ret = vram->get(dev, size, align, 0, 0, &node->vram);
 	if (ret) {
-		NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
+		kfree(node);
 		return ret;
 	}
 
-	ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
-	if (ret) {
-		NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
-		nouveau_bo_ref(NULL, &gpuobj->im_backing);
-		return ret;
+	gpuobj->vinst = node->vram->offset;
+
+	if (gpuobj->flags & NVOBJ_FLAG_VM) {
+		ret = nouveau_vm_get(dev_priv->chan_vm, size, 12,
+				     NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
+				     &node->chan_vma);
+		if (ret) {
+			vram->put(dev, &node->vram);
+			kfree(node);
+			return ret;
+		}
+
+		nouveau_vm_map(&node->chan_vma, node->vram);
+		gpuobj->vinst = node->chan_vma.offset;
 	}
 
-	gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT;
+	gpuobj->size = size;
+	gpuobj->node = node;
 	return 0;
 }
 
 void
-nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+nv50_instmem_put(struct nouveau_gpuobj *gpuobj)
 {
+	struct drm_device *dev = gpuobj->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+	struct nv50_gpuobj_node *node;
 
-	if (gpuobj && gpuobj->im_backing) {
-		if (gpuobj->im_bound)
-			dev_priv->engine.instmem.unbind(dev, gpuobj);
-		nouveau_bo_unpin(gpuobj->im_backing);
-		nouveau_bo_ref(NULL, &gpuobj->im_backing);
-		gpuobj->im_backing = NULL;
+	node = gpuobj->node;
+	gpuobj->node = NULL;
+
+	if (node->chan_vma.node) {
+		nouveau_vm_unmap(&node->chan_vma);
+		nouveau_vm_put(&node->chan_vma);
 	}
+	vram->put(dev, &node->vram);
+	kfree(node);
 }
 
 int
-nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+nv50_instmem_map(struct nouveau_gpuobj *gpuobj)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
-	struct nouveau_gpuobj *pramin_pt = priv->pramin_pt;
-	uint32_t pte, pte_end;
-	uint64_t vram;
+	struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+	struct nv50_gpuobj_node *node = gpuobj->node;
+	int ret;
 
-	if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
-		return -EINVAL;
+	ret = nouveau_vm_get(dev_priv->bar3_vm, gpuobj->size, 12,
+			     NV_MEM_ACCESS_RW, &node->vram->bar_vma);
+	if (ret)
+		return ret;
 
-	NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n",
-		 gpuobj->im_pramin->start, gpuobj->im_pramin->size);
-
-	pte     = (gpuobj->im_pramin->start >> 12) << 1;
-	pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
-	vram    = gpuobj->vinst;
-
-	NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
-		 gpuobj->im_pramin->start, pte, pte_end);
-	NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
-
-	vram |= 1;
-	if (dev_priv->vram_sys_base) {
-		vram += dev_priv->vram_sys_base;
-		vram |= 0x30;
-	}
-
-	while (pte < pte_end) {
-		nv_wo32(pramin_pt, (pte * 4) + 0, lower_32_bits(vram));
-		nv_wo32(pramin_pt, (pte * 4) + 4, upper_32_bits(vram));
-		vram += 0x1000;
-		pte += 2;
-	}
-	dev_priv->engine.instmem.flush(dev);
-
-	nv50_vm_flush(dev, 6);
-
-	gpuobj->im_bound = 1;
+	nouveau_vm_map(&node->vram->bar_vma, node->vram);
+	gpuobj->pinst = node->vram->bar_vma.offset;
 	return 0;
 }
 
-int
-nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+void
+nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
-	uint32_t pte, pte_end;
+	struct nv50_gpuobj_node *node = gpuobj->node;
 
-	if (gpuobj->im_bound == 0)
-		return -EINVAL;
-
-	/* can happen during late takedown */
-	if (unlikely(!dev_priv->ramin_available))
-		return 0;
-
-	pte     = (gpuobj->im_pramin->start >> 12) << 1;
-	pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
-
-	while (pte < pte_end) {
-		nv_wo32(priv->pramin_pt, (pte * 4) + 0, 0x00000000);
-		nv_wo32(priv->pramin_pt, (pte * 4) + 4, 0x00000000);
-		pte += 2;
+	if (node->vram->bar_vma.node) {
+		nouveau_vm_unmap(&node->vram->bar_vma);
+		nouveau_vm_put(&node->vram->bar_vma);
 	}
-	dev_priv->engine.instmem.flush(dev);
-
-	gpuobj->im_bound = 0;
-	return 0;
 }
 
 void
@@ -452,11 +415,3 @@
 		NV_ERROR(dev, "PRAMIN flush timeout\n");
 }
 
-void
-nv50_vm_flush(struct drm_device *dev, int engine)
-{
-	nv_wr32(dev, 0x100c80, (engine << 16) | 1);
-	if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
-		NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
-}
-
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
new file mode 100644
index 0000000..eebab95
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_vm.h"
+
+void
+nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 type, u32 pde,
+		struct nouveau_gpuobj *pgt)
+{
+	struct drm_nouveau_private *dev_priv = pgd->dev->dev_private;
+	u32 coverage = (pgt->size >> 3) << type;
+	u64 phys;
+
+	phys  = pgt->vinst;
+	phys |= 0x01; /* present */
+	phys |= (type == 12) ? 0x02 : 0x00; /* 4KiB pages */
+	if (dev_priv->vram_sys_base) {
+		phys += dev_priv->vram_sys_base;
+		phys |= 0x30;
+	}
+
+	if (coverage <= 32 * 1024 * 1024)
+		phys |= 0x60;
+	else if (coverage <= 64 * 1024 * 1024)
+		phys |= 0x40;
+	else if (coverage < 128 * 1024 * 1024)
+		phys |= 0x20;
+
+	nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
+	nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
+}
+
+void
+nv50_vm_unmap_pgt(struct nouveau_gpuobj *pgd, u32 pde)
+{
+	nv_wo32(pgd, (pde * 8) + 0, 0x00000000);
+	nv_wo32(pgd, (pde * 8) + 4, 0xdeadcafe);
+}
+
+static inline u64
+nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+	     u64 phys, u32 memtype, u32 target)
+{
+	struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
+
+	phys |= 1; /* present */
+	phys |= (u64)memtype << 40;
+
+	/* IGPs don't have real VRAM, re-target to stolen system memory */
+	if (target == 0 && dev_priv->vram_sys_base) {
+		phys  += dev_priv->vram_sys_base;
+		target = 3;
+	}
+
+	phys |= target << 4;
+
+	if (vma->access & NV_MEM_ACCESS_SYS)
+		phys |= (1 << 6);
+
+	if (!(vma->access & NV_MEM_ACCESS_WO))
+		phys |= (1 << 3);
+
+	return phys;
+}
+
+void
+nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+	    struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys)
+{
+	u32 block, i;
+
+	phys  = nv50_vm_addr(vma, pgt, phys, mem->memtype, 0);
+	pte <<= 3;
+	cnt <<= 3;
+
+	while (cnt) {
+		u32 offset_h = upper_32_bits(phys);
+		u32 offset_l = lower_32_bits(phys);
+
+		for (i = 7; i >= 0; i--) {
+			block = 1 << (i + 3);
+			if (cnt >= block && !(pte & (block - 1)))
+				break;
+		}
+		offset_l |= (i << 7);
+
+		phys += block << (vma->node->type - 3);
+		cnt  -= block;
+
+		while (block) {
+			nv_wo32(pgt, pte + 0, offset_l);
+			nv_wo32(pgt, pte + 4, offset_h);
+			pte += 8;
+			block -= 8;
+		}
+	}
+}
+
+void
+nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+	       u32 pte, dma_addr_t *list, u32 cnt)
+{
+	pte <<= 3;
+	while (cnt--) {
+		u64 phys = nv50_vm_addr(vma, pgt, (u64)*list++, 0, 2);
+		nv_wo32(pgt, pte + 0, lower_32_bits(phys));
+		nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+		pte += 8;
+	}
+}
+
+void
+nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
+{
+	pte <<= 3;
+	while (cnt--) {
+		nv_wo32(pgt, pte + 0, 0x00000000);
+		nv_wo32(pgt, pte + 4, 0x00000000);
+		pte += 8;
+	}
+}
+
+void
+nv50_vm_flush(struct nouveau_vm *vm)
+{
+	struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
+	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
+
+	pinstmem->flush(vm->dev);
+
+	/* BAR */
+	if (vm != dev_priv->chan_vm) {
+		nv50_vm_flush_engine(vm->dev, 6);
+		return;
+	}
+
+	pfifo->tlb_flush(vm->dev);
+
+	if (atomic_read(&vm->pgraph_refs))
+		pgraph->tlb_flush(vm->dev);
+	if (atomic_read(&vm->pcrypt_refs))
+		pcrypt->tlb_flush(vm->dev);
+}
+
+void
+nv50_vm_flush_engine(struct drm_device *dev, int engine)
+{
+	nv_wr32(dev, 0x100c80, (engine << 16) | 1);
+	if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
+		NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
new file mode 100644
index 0000000..47489ed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+static int types[0x80] = {
+	1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
+	1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
+	0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
+	1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
+};
+
+bool
+nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
+{
+	int type = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8;
+
+	if (likely(type < sizeof(types) && types[type]))
+		return true;
+	return false;
+}
+
+void
+nv50_vram_del(struct drm_device *dev, struct nouveau_vram **pvram)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+	struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
+	struct nouveau_mm *mm = man->priv;
+	struct nouveau_mm_node *this;
+	struct nouveau_vram *vram;
+
+	vram = *pvram;
+	*pvram = NULL;
+	if (unlikely(vram == NULL))
+		return;
+
+	mutex_lock(&mm->mutex);
+	while (!list_empty(&vram->regions)) {
+		this = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
+
+		list_del(&this->rl_entry);
+		nouveau_mm_put(mm, this);
+	}
+	mutex_unlock(&mm->mutex);
+
+	kfree(vram);
+}
+
+int
+nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
+	      u32 type, struct nouveau_vram **pvram)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+	struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
+	struct nouveau_mm *mm = man->priv;
+	struct nouveau_mm_node *r;
+	struct nouveau_vram *vram;
+	int ret;
+
+	if (!types[type])
+		return -EINVAL;
+	size >>= 12;
+	align >>= 12;
+	size_nc >>= 12;
+
+	vram = kzalloc(sizeof(*vram), GFP_KERNEL);
+	if (!vram)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&vram->regions);
+	vram->dev = dev_priv->dev;
+	vram->memtype = type;
+	vram->size = size;
+
+	mutex_lock(&mm->mutex);
+	do {
+		ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r);
+		if (ret) {
+			mutex_unlock(&mm->mutex);
+			nv50_vram_del(dev, &vram);
+			return ret;
+		}
+
+		list_add_tail(&r->rl_entry, &vram->regions);
+		size -= r->length;
+	} while (size);
+	mutex_unlock(&mm->mutex);
+
+	r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
+	vram->offset = (u64)r->offset << 12;
+	*pvram = vram;
+	return 0;
+}
+
+static u32
+nv50_vram_rblock(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int i, parts, colbits, rowbitsa, rowbitsb, banks;
+	u64 rowsize, predicted;
+	u32 r0, r4, rt, ru, rblock_size;
+
+	r0 = nv_rd32(dev, 0x100200);
+	r4 = nv_rd32(dev, 0x100204);
+	rt = nv_rd32(dev, 0x100250);
+	ru = nv_rd32(dev, 0x001540);
+	NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
+
+	for (i = 0, parts = 0; i < 8; i++) {
+		if (ru & (0x00010000 << i))
+			parts++;
+	}
+
+	colbits  =  (r4 & 0x0000f000) >> 12;
+	rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
+	rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
+	banks    = ((r4 & 0x01000000) ? 8 : 4);
+
+	rowsize = parts * banks * (1 << colbits) * 8;
+	predicted = rowsize << rowbitsa;
+	if (r0 & 0x00000004)
+		predicted += rowsize << rowbitsb;
+
+	if (predicted != dev_priv->vram_size) {
+		NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
+			(u32)(dev_priv->vram_size >> 20));
+		NV_WARN(dev, "we calculated %dMiB VRAM\n",
+			(u32)(predicted >> 20));
+	}
+
+	rblock_size = rowsize;
+	if (rt & 1)
+		rblock_size *= 3;
+
+	NV_DEBUG(dev, "rblock %d bytes\n", rblock_size);
+	return rblock_size;
+}
+
+int
+nv50_vram_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	dev_priv->vram_size  = nv_rd32(dev, 0x10020c);
+	dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
+	dev_priv->vram_size &= 0xffffffff00ULL;
+
+	switch (dev_priv->chipset) {
+	case 0xaa:
+	case 0xac:
+	case 0xaf:
+		dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12;
+		dev_priv->vram_rblock_size = 4096;
+		break;
+	default:
+		dev_priv->vram_rblock_size = nv50_vram_rblock(dev);
+		break;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c
new file mode 100644
index 0000000..ec18ae1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_crypt.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include "nouveau_vm.h"
+
+static void nv84_crypt_isr(struct drm_device *);
+
+int
+nv84_crypt_create_context(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj *ramin = chan->ramin;
+	int ret;
+
+	NV_DEBUG(dev, "ch%d\n", chan->id);
+
+	ret = nouveau_gpuobj_new(dev, chan, 256, 0,
+				 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
+				 &chan->crypt_ctx);
+	if (ret)
+		return ret;
+
+	nv_wo32(ramin, 0xa0, 0x00190000);
+	nv_wo32(ramin, 0xa4, chan->crypt_ctx->vinst + 0xff);
+	nv_wo32(ramin, 0xa8, chan->crypt_ctx->vinst);
+	nv_wo32(ramin, 0xac, 0);
+	nv_wo32(ramin, 0xb0, 0);
+	nv_wo32(ramin, 0xb4, 0);
+
+	dev_priv->engine.instmem.flush(dev);
+	atomic_inc(&chan->vm->pcrypt_refs);
+	return 0;
+}
+
+void
+nv84_crypt_destroy_context(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	u32 inst;
+
+	if (!chan->crypt_ctx)
+		return;
+
+	inst  = (chan->ramin->vinst >> 12);
+	inst |= 0x80000000;
+
+	/* mark context as invalid if still on the hardware, not
+	 * doing this causes issues the next time PCRYPT is used,
+	 * unsurprisingly :)
+	 */
+	nv_wr32(dev, 0x10200c, 0x00000000);
+	if (nv_rd32(dev, 0x102188) == inst)
+		nv_mask(dev, 0x102188, 0x80000000, 0x00000000);
+	if (nv_rd32(dev, 0x10218c) == inst)
+		nv_mask(dev, 0x10218c, 0x80000000, 0x00000000);
+	nv_wr32(dev, 0x10200c, 0x00000010);
+
+	nouveau_gpuobj_ref(NULL, &chan->crypt_ctx);
+	atomic_dec(&chan->vm->pcrypt_refs);
+}
+
+void
+nv84_crypt_tlb_flush(struct drm_device *dev)
+{
+	nv50_vm_flush_engine(dev, 0x0a);
+}
+
+int
+nv84_crypt_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
+
+	if (!pcrypt->registered) {
+		NVOBJ_CLASS(dev, 0x74c1, CRYPT);
+		pcrypt->registered = true;
+	}
+
+	nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
+	nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
+
+	nouveau_irq_register(dev, 14, nv84_crypt_isr);
+	nv_wr32(dev, 0x102130, 0xffffffff);
+	nv_wr32(dev, 0x102140, 0xffffffbf);
+
+	nv_wr32(dev, 0x10200c, 0x00000010);
+	return 0;
+}
+
+void
+nv84_crypt_fini(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x102140, 0x00000000);
+	nouveau_irq_unregister(dev, 14);
+}
+
+static void
+nv84_crypt_isr(struct drm_device *dev)
+{
+	u32 stat = nv_rd32(dev, 0x102130);
+	u32 mthd = nv_rd32(dev, 0x102190);
+	u32 data = nv_rd32(dev, 0x102194);
+	u32 inst = nv_rd32(dev, 0x102188) & 0x7fffffff;
+	int show = nouveau_ratelimit();
+
+	if (show) {
+		NV_INFO(dev, "PCRYPT_INTR: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+			     stat, mthd, data, inst);
+	}
+
+	nv_wr32(dev, 0x102130, stat);
+	nv_wr32(dev, 0x10200c, 0x10);
+
+	nv50_fb_vm_trap(dev, show, "PCRYPT");
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index 13a0f78..3923208 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -26,67 +26,89 @@
 
 #include "nouveau_drv.h"
 
+struct nvc0_gpuobj_node {
+	struct nouveau_bo *vram;
+	struct drm_mm_node *ramin;
+	u32 align;
+};
+
 int
-nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
-		      uint32_t *size)
+nvc0_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
 {
+	struct drm_device *dev = gpuobj->dev;
+	struct nvc0_gpuobj_node *node = NULL;
 	int ret;
 
-	*size = ALIGN(*size, 4096);
-	if (*size == 0)
-		return -EINVAL;
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+	node->align = align;
 
-	ret = nouveau_bo_new(dev, NULL, *size, 0, TTM_PL_FLAG_VRAM, 0, 0x0000,
-			     true, false, &gpuobj->im_backing);
+	ret = nouveau_bo_new(dev, NULL, size, align, TTM_PL_FLAG_VRAM,
+			     0, 0x0000, true, false, &node->vram);
 	if (ret) {
 		NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
 		return ret;
 	}
 
-	ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
+	ret = nouveau_bo_pin(node->vram, TTM_PL_FLAG_VRAM);
 	if (ret) {
 		NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
-		nouveau_bo_ref(NULL, &gpuobj->im_backing);
+		nouveau_bo_ref(NULL, &node->vram);
 		return ret;
 	}
 
-	gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT;
+	gpuobj->vinst = node->vram->bo.mem.start << PAGE_SHIFT;
+	gpuobj->size  = node->vram->bo.mem.num_pages << PAGE_SHIFT;
+	gpuobj->node  = node;
 	return 0;
 }
 
 void
-nvc0_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+nvc0_instmem_put(struct nouveau_gpuobj *gpuobj)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvc0_gpuobj_node *node;
 
-	if (gpuobj && gpuobj->im_backing) {
-		if (gpuobj->im_bound)
-			dev_priv->engine.instmem.unbind(dev, gpuobj);
-		nouveau_bo_unpin(gpuobj->im_backing);
-		nouveau_bo_ref(NULL, &gpuobj->im_backing);
-		gpuobj->im_backing = NULL;
-	}
+	node = gpuobj->node;
+	gpuobj->node = NULL;
+
+	nouveau_bo_unpin(node->vram);
+	nouveau_bo_ref(NULL, &node->vram);
+	kfree(node);
 }
 
 int
-nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+nvc0_instmem_map(struct nouveau_gpuobj *gpuobj)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t pte, pte_end;
-	uint64_t vram;
+	struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+	struct nvc0_gpuobj_node *node = gpuobj->node;
+	struct drm_device *dev = gpuobj->dev;
+	struct drm_mm_node *ramin = NULL;
+	u32 pte, pte_end;
+	u64 vram;
 
-	if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
-		return -EINVAL;
+	do {
+		if (drm_mm_pre_get(&dev_priv->ramin_heap))
+			return -ENOMEM;
 
-	NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n",
-		 gpuobj->im_pramin->start, gpuobj->im_pramin->size);
+		spin_lock(&dev_priv->ramin_lock);
+		ramin = drm_mm_search_free(&dev_priv->ramin_heap, gpuobj->size,
+					   node->align, 0);
+		if (ramin == NULL) {
+			spin_unlock(&dev_priv->ramin_lock);
+			return -ENOMEM;
+		}
 
-	pte     = gpuobj->im_pramin->start >> 12;
-	pte_end = (gpuobj->im_pramin->size >> 12) + pte;
+		ramin = drm_mm_get_block_atomic(ramin, gpuobj->size, node->align);
+		spin_unlock(&dev_priv->ramin_lock);
+	} while (ramin == NULL);
+
+	pte     = (ramin->start >> 12) << 1;
+	pte_end = ((ramin->size >> 12) << 1) + pte;
 	vram    = gpuobj->vinst;
 
 	NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
-		 gpuobj->im_pramin->start, pte, pte_end);
+		 ramin->start, pte, pte_end);
 	NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
 
 	while (pte < pte_end) {
@@ -103,30 +125,35 @@
 		nv_wr32(dev, 0x100cbc, 0x80000005);
 	}
 
-	gpuobj->im_bound = 1;
+	node->ramin   = ramin;
+	gpuobj->pinst = ramin->start;
 	return 0;
 }
 
-int
-nvc0_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+void
+nvc0_instmem_unmap(struct nouveau_gpuobj *gpuobj)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t pte, pte_end;
+	struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+	struct nvc0_gpuobj_node *node = gpuobj->node;
+	u32 pte, pte_end;
 
-	if (gpuobj->im_bound == 0)
-		return -EINVAL;
+	if (!node->ramin || !dev_priv->ramin_available)
+		return;
 
-	pte     = gpuobj->im_pramin->start >> 12;
-	pte_end = (gpuobj->im_pramin->size >> 12) + pte;
+	pte     = (node->ramin->start >> 12) << 1;
+	pte_end = ((node->ramin->size >> 12) << 1) + pte;
+
 	while (pte < pte_end) {
-		nv_wr32(dev, 0x702000 + (pte * 8), 0);
-		nv_wr32(dev, 0x702004 + (pte * 8), 0);
+		nv_wr32(gpuobj->dev, 0x702000 + (pte * 8), 0);
+		nv_wr32(gpuobj->dev, 0x702004 + (pte * 8), 0);
 		pte++;
 	}
-	dev_priv->engine.instmem.flush(dev);
+	dev_priv->engine.instmem.flush(gpuobj->dev);
 
-	gpuobj->im_bound = 0;
-	return 0;
+	spin_lock(&dev_priv->ramin_lock);
+	drm_mm_put_block(node->ramin);
+	node->ramin = NULL;
+	spin_unlock(&dev_priv->ramin_lock);
 }
 
 void
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h
index 881f8a5..fe0f253 100644
--- a/drivers/gpu/drm/nouveau/nvreg.h
+++ b/drivers/gpu/drm/nouveau/nvreg.h
@@ -153,7 +153,8 @@
 #define NV_PCRTC_START					0x00600800
 #define NV_PCRTC_CONFIG					0x00600804
 #	define NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA		(1 << 0)
-#	define NV_PCRTC_CONFIG_START_ADDRESS_HSYNC		(2 << 0)
+#	define NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC		(4 << 0)
+#	define NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC		(2 << 0)
 #define NV_PCRTC_CURSOR_CONFIG				0x00600810
 #	define NV_PCRTC_CURSOR_CONFIG_ENABLE_ENABLE		(1 << 0)
 #	define NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE	(1 << 4)
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
index bc5590b..e2cfe80 100644
--- a/include/drm/nouveau_drm.h
+++ b/include/drm/nouveau_drm.h
@@ -71,16 +71,14 @@
 #define NOUVEAU_GETPARAM_PCI_VENDOR      3
 #define NOUVEAU_GETPARAM_PCI_DEVICE      4
 #define NOUVEAU_GETPARAM_BUS_TYPE        5
-#define NOUVEAU_GETPARAM_FB_PHYSICAL     6
-#define NOUVEAU_GETPARAM_AGP_PHYSICAL    7
 #define NOUVEAU_GETPARAM_FB_SIZE         8
 #define NOUVEAU_GETPARAM_AGP_SIZE        9
-#define NOUVEAU_GETPARAM_PCI_PHYSICAL    10
 #define NOUVEAU_GETPARAM_CHIPSET_ID      11
 #define NOUVEAU_GETPARAM_VM_VRAM_BASE    12
 #define NOUVEAU_GETPARAM_GRAPH_UNITS     13
 #define NOUVEAU_GETPARAM_PTIMER_TIME     14
 #define NOUVEAU_GETPARAM_HAS_BO_USAGE    15
+#define NOUVEAU_GETPARAM_HAS_PAGEFLIP    16
 struct drm_nouveau_getparam {
 	uint64_t param;
 	uint64_t value;
@@ -171,7 +169,6 @@
 };
 
 #define NOUVEAU_GEM_CPU_PREP_NOWAIT                                  0x00000001
-#define NOUVEAU_GEM_CPU_PREP_NOBLOCK                                 0x00000002
 #define NOUVEAU_GEM_CPU_PREP_WRITE                                   0x00000004
 struct drm_nouveau_gem_cpu_prep {
 	uint32_t handle;