Merge branch 'master' into nouveau-1

Conflicts:

	linux-core/Makefile.kernel
diff --git a/bsd-core/i915_drv.c b/bsd-core/i915_drv.c
index 269d7b3..d42b207 100644
--- a/bsd-core/i915_drv.c
+++ b/bsd-core/i915_drv.c
@@ -1,4 +1,4 @@
-/* i915_drv.c -- ATI Radeon driver -*- linux-c -*-
+/* i915_drv.c -- Intel i915 driver -*- linux-c -*-
  * Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com
  */
 /*-
diff --git a/configure.ac b/configure.ac
index 4881350..224f43a 100644
--- a/configure.ac
+++ b/configure.ac
@@ -19,7 +19,7 @@
 #  CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
 AC_PREREQ(2.57)
-AC_INIT([libdrm], 2.0.2, [dri-devel@lists.sourceforge.net], libdrm)
+AC_INIT([libdrm], 2.2.0, [dri-devel@lists.sourceforge.net], libdrm)
 AC_CONFIG_SRCDIR([Makefile.am])
 AM_INIT_AUTOMAKE([dist-bzip2])
 
@@ -30,6 +30,7 @@
 AC_PROG_CC
 
 AC_HEADER_STDC
+AC_SYS_LARGEFILE
 
 pkgconfigdir=${libdir}/pkgconfig
 AC_SUBST(pkgconfigdir)
diff --git a/libdrm/Makefile.am b/libdrm/Makefile.am
index b12e87f..91a7e5d 100644
--- a/libdrm/Makefile.am
+++ b/libdrm/Makefile.am
@@ -26,6 +26,6 @@
 libdrm_la_SOURCES = xf86drm.c xf86drmHash.c xf86drmRandom.c xf86drmSL.c
 
 libdrmincludedir = ${includedir}
-libdrminclude_HEADERS = xf86drm.h
+libdrminclude_HEADERS = xf86drm.h xf86mm.h
 
 EXTRA_DIST = ChangeLog TODO
diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c
index d4f80b4..5efb532 100644
--- a/libdrm/xf86drm.c
+++ b/libdrm/xf86drm.c
@@ -46,6 +46,9 @@
 #  include <sys/mman.h>
 # endif
 #else
+# ifdef HAVE_CONFIG_H
+#  include <config.h>
+# endif
 # include <stdio.h>
 # include <stdlib.h>
 # include <unistd.h>
@@ -66,6 +69,7 @@
 # include "drm.h"
 #endif
 
+
 /* Not all systems have MAP_FAILED defined */
 #ifndef MAP_FAILED
 #define MAP_FAILED ((void *)-1)
@@ -713,7 +717,7 @@
      *   revision 1.2.x = added drmSetInterfaceVersion
      *                    modified drmOpen to handle both busid and name
      */
-    version->version_major      = 1;
+    version->version_major      = 2;
     version->version_minor      = 2;
     version->version_patchlevel = 0;
 
@@ -2252,3 +2256,960 @@
     }
     return 0;
 }
+
+
+/*
+ * Valid flags are 
+ * DRM_FENCE_FLAG_EMIT
+ * DRM_FENCE_FLAG_SHAREABLE
+ * DRM_FENCE_MASK_DRIVER
+ */
+
+int drmFenceCreate(int fd, unsigned flags, int class,unsigned type, 
+		   drmFence *fence)
+{
+    drm_fence_arg_t arg;
+    
+    memset(&arg, 0, sizeof(arg));
+    arg.type = type;
+    arg.class = class;
+    arg.op = drm_fence_create;
+    if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+	return -errno;
+    fence->handle = arg.handle;
+    fence->class = arg.class;
+    fence->type = arg.type;
+    fence->flags = arg.flags;
+    fence->signaled = 0;
+    return 0;
+}
+
+/*
+ * Valid flags are 
+ * DRM_FENCE_FLAG_SHAREABLE
+ * DRM_FENCE_MASK_DRIVER
+ */
+
+int drmFenceBuffers(int fd, unsigned flags, drmFence *fence)
+{
+    drm_fence_arg_t arg;
+    
+    memset(&arg, 0, sizeof(arg));
+    arg.flags = flags;
+    arg.op = drm_fence_buffers;
+    if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+	return -errno;
+    fence->handle = arg.handle;
+    fence->class = arg.class;
+    fence->type = arg.type;
+    fence->flags = arg.flags;
+    fence->signaled = 0;
+    return 0;
+}
+    
+int drmFenceDestroy(int fd, const drmFence *fence)
+{
+    drm_fence_arg_t arg;
+   
+    memset(&arg, 0, sizeof(arg));
+    arg.handle = fence->handle;
+    arg.op = drm_fence_destroy;
+    if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+	return -errno;
+    return 0;
+}
+
+int drmFenceReference(int fd, unsigned handle, drmFence *fence)
+{
+    drm_fence_arg_t arg;
+   
+    memset(&arg, 0, sizeof(arg));
+    arg.handle = handle;
+    arg.op = drm_fence_reference;
+    if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+	return -errno;
+    fence->handle = arg.handle;
+    fence->class = arg.class;
+    fence->type = arg.type;
+    fence->flags = arg.flags;
+    fence->signaled = arg.signaled;
+    return 0;
+}
+
+int drmFenceUnreference(int fd, const drmFence *fence)
+{
+    drm_fence_arg_t arg;
+   
+    memset(&arg, 0, sizeof(arg));
+    arg.handle = fence->handle;
+    arg.op = drm_fence_unreference;
+    if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+	return -errno;
+    return 0;
+}
+
+int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type)
+{
+    drm_fence_arg_t arg;
+   
+    memset(&arg, 0, sizeof(arg));
+    arg.handle = fence->handle;
+    arg.type = flush_type;
+    arg.op = drm_fence_flush;
+    if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+	return -errno;
+    fence->class = arg.class;
+    fence->type = arg.type;
+    fence->signaled = arg.signaled;
+    return 0;
+}
+
+int drmFenceUpdate(int fd, drmFence *fence)
+{
+	drm_fence_arg_t arg;
+	
+    memset(&arg, 0, sizeof(arg));
+    arg.handle = fence->handle;
+    arg.op = drm_fence_signaled;
+    if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+	return -errno;
+    fence->class = arg.class;
+    fence->type = arg.type;
+    fence->signaled = arg.signaled;
+    return 0;
+}
+
+int drmFenceSignaled(int fd, drmFence *fence, unsigned fenceType, 
+		     int *signaled)
+{
+    int 
+	ret;
+
+    if ((fence->flags & DRM_FENCE_FLAG_SHAREABLE) ||
+	((fenceType & fence->signaled) != fenceType)) {
+
+	ret = drmFenceFlush(fd, fence, fenceType);
+	if (ret)
+	    return ret;
+    }
+
+    *signaled = ((fenceType & fence->signaled) == fenceType);
+
+    return 0;
+}
+
+/*
+ * Valid flags are 
+ * DRM_FENCE_FLAG_SHAREABLE
+ * DRM_FENCE_MASK_DRIVER
+ */
+
+
+int drmFenceEmit(int fd, unsigned flags, drmFence *fence, unsigned emit_type)
+{
+    drm_fence_arg_t arg;
+   
+    memset(&arg, 0, sizeof(arg));
+    arg.flags = flags;
+    arg.handle = fence->handle;
+    arg.type = emit_type;
+    arg.op = drm_fence_emit;
+    if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+	return -errno;
+    fence->class = arg.class;
+    fence->type = arg.type;
+    fence->signaled = arg.signaled;
+    return 0;
+}
+
+/*
+ * Valid flags are 
+ * DRM_FENCE_FLAG_WAIT_LAZY
+ * DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS
+ */
+    
+int drmFenceWait(int fd, unsigned flags, drmFence *fence, unsigned flush_type)
+{
+    drm_fence_arg_t arg;
+    int ret;
+
+    if (flush_type == 0) {
+	flush_type = fence->type;
+    }
+
+    if (!(fence->flags & DRM_FENCE_FLAG_SHAREABLE)) {
+	if ((flush_type & fence->signaled) == flush_type) {
+	    return 0;
+	}
+    }
+
+    memset(&arg, 0, sizeof(arg));
+    arg.handle = fence->handle;
+    arg.type = flush_type;
+    arg.flags = flags;
+    arg.op = drm_fence_wait;
+    do {
+	ret = ioctl(fd, DRM_IOCTL_FENCE, &arg);
+    } while (ret != 0 && errno == EAGAIN);
+
+    if (ret)
+	return -errno;
+
+    fence->class = arg.class;
+    fence->type = arg.type;
+    fence->signaled = arg.signaled;
+    return 0;
+}    
+
+static int drmAdjustListNodes(drmBOList *list)
+{
+    drmBONode *node;
+    drmMMListHead *l;
+    int ret = 0;
+
+    while(list->numCurrent < list->numTarget) {
+	node = (drmBONode *) malloc(sizeof(*node));
+	if (!node) {
+	    ret = -ENOMEM;
+	    break;
+	}
+	list->numCurrent++;
+	DRMLISTADD(&node->head, &list->free);
+    }
+
+    while(list->numCurrent > list->numTarget) {
+	l = list->free.next;
+	if (l == &list->free)
+	    break;
+	DRMLISTDEL(l);
+	node = DRMLISTENTRY(drmBONode, l, head);
+	free(node);
+	list->numCurrent--;
+    }
+    return ret;
+}
+
+void drmBOFreeList(drmBOList *list)
+{
+    drmBONode *node;
+    drmMMListHead *l;
+
+    l = list->list.next;
+    while(l != &list->list) {
+	DRMLISTDEL(l);
+	node = DRMLISTENTRY(drmBONode, l, head);
+	free(node);
+	l = list->free.next;
+	list->numCurrent--;
+	list->numOnList--;
+    }
+
+    l = list->free.next;
+    while(l != &list->free) {
+	DRMLISTDEL(l);
+	node = DRMLISTENTRY(drmBONode, l, head);
+	free(node);
+	l = list->free.next;
+	list->numCurrent--;
+    }
+}
+	
+int drmBOResetList(drmBOList *list) {
+
+    drmMMListHead *l;
+    int ret;
+
+    ret = drmAdjustListNodes(list);
+    if (ret)
+	return ret;
+
+    l = list->list.next;
+    while(l != &list->list) {
+	DRMLISTDEL(l);
+	DRMLISTADD(l, &list->free);
+	list->numOnList--;
+	l = list->list.next;
+    }
+    return drmAdjustListNodes(list);
+}
+	
+static drmBONode *drmAddListItem(drmBOList *list, drmBO *item, 
+				 unsigned long arg0,
+				 unsigned long arg1)
+{
+    drmBONode *node;
+    drmMMListHead *l;
+
+    l = list->free.next;
+    if (l == &list->free) {
+	node = (drmBONode *) malloc(sizeof(*node));
+	if (!node) {
+	    return NULL;
+	}
+	list->numCurrent++;
+    } else {
+	DRMLISTDEL(l);
+	node = DRMLISTENTRY(drmBONode, l, head);
+    }
+    node->buf = item;
+    node->arg0 = arg0;
+    node->arg1 = arg1;
+    DRMLISTADD(&node->head, &list->list);
+    list->numOnList++;
+    return node;
+}
+     	
+void *drmBOListIterator(drmBOList *list)
+{
+    void *ret = list->list.next;
+
+    if (ret == &list->list)
+	return NULL;
+    return ret;
+}
+
+void *drmBOListNext(drmBOList *list, void *iterator)
+{
+    void *ret;
+
+    drmMMListHead *l = (drmMMListHead *) iterator;
+    ret = l->next;
+    if (ret == &list->list)
+	return NULL;
+    return ret;
+}
+
+drmBO *drmBOListBuf(void *iterator)
+{
+    drmBONode *node;
+    drmMMListHead *l = (drmMMListHead *) iterator;
+    node = DRMLISTENTRY(drmBONode, l, head);
+    
+    return node->buf;
+}
+
+
+int drmBOCreateList(int numTarget, drmBOList *list)
+{
+    DRMINITLISTHEAD(&list->list);
+    DRMINITLISTHEAD(&list->free);
+    list->numTarget = numTarget;
+    list->numCurrent = 0;
+    list->numOnList = 0;
+    return drmAdjustListNodes(list);
+}
+
+static void drmBOCopyReply(const drm_bo_arg_reply_t *rep, 
+			   drmBO *buf)
+{
+    buf->handle = rep->handle;
+    buf->flags = rep->flags;
+    buf->size = rep->size;
+    buf->offset = rep->offset;
+    buf->mapHandle = rep->arg_handle;
+    buf->mask = rep->mask;
+    buf->start = rep->buffer_start;
+    buf->fenceFlags = rep->fence_flags;
+    buf->replyFlags = rep->rep_flags;
+    buf->pageAlignment = rep->page_alignment;
+}
+    
+    
+
+int drmBOCreate(int fd, unsigned long start, unsigned long size, 
+		unsigned pageAlignment, void *user_buffer, drm_bo_type_t type, 
+		unsigned mask,
+		unsigned hint, drmBO *buf)
+{
+    drm_bo_arg_t arg;
+    drm_bo_arg_request_t *req = &arg.d.req;
+    drm_bo_arg_reply_t *rep = &arg.d.rep;
+    int ret;
+
+    memset(buf, 0, sizeof(*buf));
+    memset(&arg, 0, sizeof(arg));
+    req->mask = mask;
+    req->hint = hint;
+    req->size = size;
+    req->type = type;
+    req->page_alignment = pageAlignment;
+
+    buf->virtual = NULL;
+
+    switch(type) {
+    case drm_bo_type_dc:
+        req->buffer_start = start;
+	break;
+    case drm_bo_type_user:
+	req->buffer_start = (unsigned long) user_buffer;
+	buf->virtual = user_buffer;
+	break;
+    case drm_bo_type_fake:
+        req->buffer_start = start;
+	break;
+    default:
+	return -EINVAL;
+    }
+    req->op = drm_bo_create;
+
+    do {
+	ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
+    } while (ret != 0 && errno == EAGAIN);
+
+    if (ret)
+	return -errno;
+    if (!arg.handled) {
+	return -EFAULT;
+    }
+    if (rep->ret) {
+        fprintf(stderr, "Error %d\n", rep->ret);
+	return rep->ret;
+    }
+    
+    drmBOCopyReply(rep, buf);
+    buf->mapVirtual = NULL;
+    buf->mapCount = 0;
+
+    return 0;
+}
+
+int drmBODestroy(int fd, drmBO *buf)
+{
+    drm_bo_arg_t arg;
+    drm_bo_arg_request_t *req = &arg.d.req;
+    drm_bo_arg_reply_t *rep = &arg.d.rep;
+    
+    if (buf->mapVirtual && (buf->type != drm_bo_type_fake)) {
+	(void) drmUnmap(buf->mapVirtual, buf->start + buf->size);
+	buf->mapVirtual = NULL;
+	buf->virtual = NULL;
+    }
+
+    memset(&arg, 0, sizeof(arg));
+    req->handle = buf->handle;
+    req->op = drm_bo_destroy;
+
+    if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg))
+	return -errno;
+    if (!arg.handled) {
+	return -EFAULT;
+    }
+    if (rep->ret) {
+	return rep->ret;
+    }
+
+    buf->handle = 0;
+    return 0;
+}
+ 
+int drmBOReference(int fd, unsigned handle, drmBO *buf)
+{
+
+    drm_bo_arg_t arg;
+    drm_bo_arg_request_t *req = &arg.d.req;
+    drm_bo_arg_reply_t *rep = &arg.d.rep;
+    
+    memset(&arg, 0, sizeof(arg));
+    req->handle = handle;
+    req->op = drm_bo_reference;
+    
+    if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg))
+	return -errno;
+    if (!arg.handled) {
+	return -EFAULT;
+    }
+    if (rep->ret) {
+	return rep->ret;
+    }
+
+    drmBOCopyReply(rep, buf);
+    buf->type = drm_bo_type_dc;
+    buf->mapVirtual = NULL;
+    buf->mapCount = 0;
+    buf->virtual = NULL;
+
+    return 0;
+}
+
+int drmBOUnReference(int fd, drmBO *buf)
+{
+    drm_bo_arg_t arg;
+    drm_bo_arg_request_t *req = &arg.d.req;
+    drm_bo_arg_reply_t *rep = &arg.d.rep;
+    
+
+    if (buf->mapVirtual && (buf->type != drm_bo_type_fake)) {
+	(void) munmap(buf->mapVirtual, buf->start + buf->size);
+	buf->mapVirtual = NULL;
+	buf->virtual = NULL;
+    }
+
+    memset(&arg, 0, sizeof(arg));
+    req->handle = buf->handle;
+    req->op = drm_bo_unreference;
+
+    if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg))
+	return -errno;
+    if (!arg.handled) {
+	return -EFAULT;
+    }
+    if (rep->ret) {
+	return rep->ret;
+    }
+
+    buf->handle = 0;
+    return 0;
+}   
+
+/*
+ * Flags can be  DRM_BO_FLAG_READ, DRM_BO_FLAG_WRITE or'ed together
+ * Hint currently be DRM_BO_HINT_DONT_BLOCK, which makes the
+ * call return an -EBUSY if it can' immediately honor the mapping request.
+ */
+
+int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
+	     void **address)
+{
+
+    drm_bo_arg_t arg;
+    drm_bo_arg_request_t *req = &arg.d.req;
+    drm_bo_arg_reply_t *rep = &arg.d.rep;
+    int ret = 0;
+
+    /*
+     * Make sure we have a virtual address of the buffer.
+     */
+
+    if (!buf->virtual && buf->type != drm_bo_type_fake) {
+	drmAddress virtual;
+	virtual = mmap(0, buf->size + buf->start, 
+		       PROT_READ | PROT_WRITE, MAP_SHARED,
+		       fd, buf->mapHandle);
+	if (virtual == MAP_FAILED) {
+	    ret = -errno;
+	}
+	if (ret) 
+	    return ret;
+	buf->mapVirtual = virtual;
+	buf->virtual = ((char *) virtual) + buf->start;
+    }
+
+    memset(&arg, 0, sizeof(arg));
+    req->handle = buf->handle;
+    req->mask = mapFlags;
+    req->hint = mapHint;
+    req->op = drm_bo_map;
+
+    /*
+     * May hang if the buffer object is busy.
+     * This IOCTL synchronizes the buffer.
+     */
+    
+    do {
+	ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
+    } while (ret != 0 && errno == EAGAIN);
+
+    if (ret) 
+	return ret;
+    if (!arg.handled) 
+	return -EFAULT;
+    if (rep->ret)
+	return rep->ret;
+
+    drmBOCopyReply(rep, buf);	
+    buf->mapFlags = mapFlags;
+    ++buf->mapCount;
+    *address = buf->virtual;
+
+    return 0;
+}
+
+int drmBOUnmap(int fd, drmBO *buf)
+{
+    drm_bo_arg_t arg;
+    drm_bo_arg_request_t *req = &arg.d.req;
+    drm_bo_arg_reply_t *rep = &arg.d.rep;
+
+	
+    memset(&arg, 0, sizeof(arg));
+    req->handle = buf->handle;
+    req->op = drm_bo_unmap;
+
+    if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg)) {
+	return -errno;
+    }
+    if (!arg.handled) 
+        return -EFAULT;
+    if (rep->ret)
+	return rep->ret;
+
+    return 0;
+}
+    
+int drmBOValidate(int fd, drmBO *buf, unsigned flags, unsigned mask, 
+		  unsigned hint)
+{
+    drm_bo_arg_t arg;
+    drm_bo_arg_request_t *req = &arg.d.req;
+    drm_bo_arg_reply_t *rep = &arg.d.rep;
+    int ret = 0;
+
+    memset(&arg, 0, sizeof(arg));
+    req->handle = buf->handle;
+    req->mask = flags;
+    req->hint = hint;
+    req->arg_handle = mask; /* Encode mask in the arg_handle field :/ */
+    req->op = drm_bo_validate;
+
+    do{
+	ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
+    } while (ret && errno == EAGAIN);
+    
+    if (ret) 
+	return ret;
+    if (!arg.handled)
+	return -EFAULT;
+    if (rep->ret)
+	return rep->ret;
+
+    drmBOCopyReply(rep, buf);
+    return 0;
+}
+	    
+
+int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle)
+{
+    drm_bo_arg_t arg;
+    drm_bo_arg_request_t *req = &arg.d.req;
+    drm_bo_arg_reply_t *rep = &arg.d.rep;
+    int ret = 0;
+
+    memset(&arg, 0, sizeof(arg));
+    req->handle = buf->handle;
+    req->mask = flags;
+    req->arg_handle = fenceHandle;
+    req->op = drm_bo_validate;
+
+    ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
+    
+    if (ret) 
+	return ret;
+    if (!arg.handled)
+	return -EFAULT;
+    if (rep->ret)
+	return rep->ret;
+    return 0;
+}
+
+int drmBOInfo(int fd, drmBO *buf)
+{
+    drm_bo_arg_t arg;
+    drm_bo_arg_request_t *req = &arg.d.req;
+    drm_bo_arg_reply_t *rep = &arg.d.rep;
+    int ret = 0;
+
+    memset(&arg, 0, sizeof(arg));
+    req->handle = buf->handle;
+    req->op = drm_bo_info;
+
+    ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
+    
+    if (ret) 
+	return ret;
+    if (!arg.handled)
+	return -EFAULT;
+    if (rep->ret)
+	return rep->ret;
+    drmBOCopyReply(rep, buf);
+    return 0;
+}
+
+int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint)
+{
+    drm_bo_arg_t arg;
+    drm_bo_arg_request_t *req = &arg.d.req;
+    drm_bo_arg_reply_t *rep = &arg.d.rep;
+    int ret = 0;
+
+    if ((buf->flags & DRM_BO_FLAG_SHAREABLE) ||
+	(buf->replyFlags & DRM_BO_REP_BUSY)) {
+        memset(&arg, 0, sizeof(arg));
+	req->handle = buf->handle;
+	req->op = drm_bo_wait_idle;
+	req->hint = hint;
+
+	do {
+	    ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
+	} while (ret && errno == EAGAIN);
+
+	if (ret) 
+	    return ret;
+	if (!arg.handled)
+	    return -EFAULT;
+	if (rep->ret)
+	    return rep->ret;
+	drmBOCopyReply(rep, buf);
+    }
+    return 0;
+}
+	
+int drmBOBusy(int fd, drmBO *buf, int *busy)
+{
+    if (!(buf->flags & DRM_BO_FLAG_SHAREABLE) &&
+	!(buf->replyFlags & DRM_BO_REP_BUSY)) {
+	*busy = 0;
+	return 0;
+    } else {
+	int ret = drmBOInfo(fd, buf);
+	if (ret)
+	    return ret;
+	*busy = (buf->replyFlags & DRM_BO_REP_BUSY);
+	return 0;
+    }
+}
+    
+    
+int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags, 
+		       unsigned mask,
+		       int *newItem)
+{
+    drmBONode *node, *cur;
+    drmMMListHead *l;
+
+    *newItem = 0;
+    cur = NULL;
+
+    for (l = list->list.next; l != &list->list; l = l->next) {
+	node = DRMLISTENTRY(drmBONode, l, head);
+	if (node->buf == buf) {
+	    cur = node;
+	    break;
+	}
+    }
+    if (!cur) {
+	cur = drmAddListItem(list, buf, flags, mask);
+	if (!cur) {
+	    drmMsg("Out of memory creating validate list node.\n");
+	    return -ENOMEM;
+	}
+	*newItem = 1;
+	cur->arg0 = flags;
+	cur->arg1 = mask;
+    } else {
+	unsigned memMask = (cur->arg1 | mask) & DRM_BO_MASK_MEM;
+	unsigned memFlags = cur->arg0 & flags & memMask;
+	
+	if (!memFlags) {
+	    drmMsg("Incompatible memory location requests "
+		   "on validate list.\n");
+	    drmMsg("Previous flag: 0x%08lx, mask: 0x%08lx\n",
+		   cur->arg0, cur->arg1);
+	    drmMsg("Current flag: 0x%08lx, mask: 0x%08lx\n",
+		   flags, mask);
+	    return -EINVAL;
+	}
+	if (mask & cur->arg1 & ~DRM_BO_MASK_MEM  & (cur->arg0 ^ flags)) {
+	    drmMsg("Incompatible buffer flag requests "
+		   "on validate list.\n");
+	    drmMsg("Previous flag: 0x%08lx, mask: 0x%08lx\n",
+		   cur->arg0, cur->arg1);
+	    drmMsg("Current flag: 0x%08lx, mask: 0x%08lx\n",
+		   flags, mask);
+	    return -EINVAL;
+	}
+	cur->arg1 |= mask;
+	cur->arg0 = memFlags | ((cur->arg0 | flags) & 
+				cur->arg1 & ~DRM_BO_MASK_MEM);	
+    }
+    return 0;
+}
+
+
+int drmBOValidateList(int fd, drmBOList *list)
+{
+   
+  drmBONode *node;
+  drmMMListHead *l;
+  drm_bo_arg_t *arg, *first;
+  drm_bo_arg_request_t *req;
+  drm_bo_arg_reply_t *rep;
+  drm_u64_t *prevNext = NULL;
+  drmBO *buf;
+  int ret;
+
+  first = NULL;
+
+  for (l = list->list.next; l != &list->list; l = l->next) {
+      node = DRMLISTENTRY(drmBONode, l, head);
+
+      arg = &node->bo_arg;
+      req = &arg->d.req;
+
+      if (!first)
+	  first = arg;
+
+      if (prevNext)
+	  *prevNext = (unsigned long) arg;
+
+      memset(arg, 0, sizeof(*arg));
+      prevNext = &arg->next;
+      req->handle = node->buf->handle;
+      req->op = drm_bo_validate;
+      req->mask = node->arg0;
+      req->hint = 0;
+      req->arg_handle = node->arg1;
+  }
+  
+  if (!first) 
+      return 0;
+
+  do{
+      ret = ioctl(fd, DRM_IOCTL_BUFOBJ, first);
+  } while (ret && errno == EAGAIN);
+
+
+  if (ret)
+      return -errno;
+  
+  for (l = list->list.next; l != &list->list; l = l->next) {
+      node = DRMLISTENTRY(drmBONode, l, head);
+      arg = &node->bo_arg;
+      rep = &arg->d.rep;
+      
+      if (!arg->handled) {
+	  drmMsg("Unhandled request\n");
+	  return -EFAULT;
+      }
+      if (rep->ret)
+	  return rep->ret;
+
+      buf = node->buf;
+      drmBOCopyReply(rep, buf);
+  }
+
+  return 0;
+}
+	  
+
+int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle)
+{
+   
+  drmBONode *node;
+  drmMMListHead *l;
+  drm_bo_arg_t *arg, *first;
+  drm_bo_arg_request_t *req;
+  drm_bo_arg_reply_t *rep;
+  drm_u64_t *prevNext = NULL;
+  drmBO *buf;
+  unsigned fence_flags;
+  int ret;
+
+  first = NULL;
+
+  for (l = list->list.next; l != &list->list; l = l->next) {
+      node = DRMLISTENTRY(drmBONode, l, head);
+
+      arg = &node->bo_arg;
+      req = &arg->d.req;
+
+      if (!first)
+	  first = arg;
+
+      if (prevNext)
+	  *prevNext = (unsigned long) arg;
+
+      memset(arg, 0, sizeof(*arg));
+      prevNext = &arg->next;
+      req->handle = node->buf->handle;
+      req->op = drm_bo_fence;
+      req->mask = node->arg0;
+      req->arg_handle = fenceHandle;
+  }
+  
+  if (!first) 
+      return 0;
+
+  ret = ioctl(fd, DRM_IOCTL_BUFOBJ, first);
+
+  if (ret)
+      return -errno;
+  
+  for (l = list->list.next; l != &list->list; l = l->next) {
+      node = DRMLISTENTRY(drmBONode, l, head);
+
+      arg = &node->bo_arg;
+      rep = &arg->d.rep;
+      
+      if (!arg->handled)
+	  return -EFAULT;
+      if (rep->ret)
+	  return rep->ret;
+      drmBOCopyReply(rep, node->buf);
+  }
+
+  return 0;
+}
+
+int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize,
+	      unsigned memType)
+{
+    drm_mm_init_arg_t arg;
+    
+    memset(&arg, 0, sizeof(arg));
+    arg.req.op = mm_init;
+    arg.req.p_offset = pOffset;
+    arg.req.p_size = pSize;
+    arg.req.mem_type = memType;
+
+    if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg))
+	return -errno;
+    
+    return 0;	
+}
+
+int drmMMTakedown(int fd, unsigned memType)
+{
+    drm_mm_init_arg_t arg;
+
+
+    memset(&arg, 0, sizeof(arg));
+    arg.req.op = mm_takedown;
+    arg.req.mem_type = memType;
+
+    if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg))
+	return -errno;
+    
+    return 0;	
+}
+
+int drmMMLock(int fd, unsigned memType)
+{
+    drm_mm_init_arg_t arg;
+    int ret;
+
+    memset(&arg, 0, sizeof(arg));
+    arg.req.op = mm_lock;
+    arg.req.mem_type = memType;
+
+    do{
+	ret = ioctl(fd, DRM_IOCTL_MM_INIT, &arg);
+    } while (ret && errno == EAGAIN);
+    
+    return ret;	
+}
+
+int drmMMUnlock(int fd, unsigned memType)
+{
+    drm_mm_init_arg_t arg;
+    int ret;
+
+    memset(&arg, 0, sizeof(arg));
+    arg.req.op = mm_unlock;
+    arg.req.mem_type = memType;
+
+    do{
+	ret = ioctl(fd, DRM_IOCTL_MM_INIT, &arg);
+    } while (ret && errno == EAGAIN);
+    
+    return ret;	
+}
diff --git a/libdrm/xf86drm.h b/libdrm/xf86drm.h
index d58baa7..86ee7d3 100644
--- a/libdrm/xf86drm.h
+++ b/libdrm/xf86drm.h
@@ -283,7 +283,6 @@
 	int drm_dd_minor;
 } drmSetVersion, *drmSetVersionPtr;
 
-
 #define __drm_dummy_lock(lock) (*(__volatile__ unsigned int *)lock)
 
 #define DRM_LOCK_HELD  0x80000000U /**< Hardware lock is held */
@@ -487,6 +486,8 @@
             }                                                          \
 	} while(0)
 
+
+
 /* General user-level programmer's API: unprivileged */
 extern int           drmAvailable(void);
 extern int           drmOpen(const char *name, const char *busid);
@@ -636,4 +637,6 @@
 				 unsigned long *prev_key, void **prev_value,
 				 unsigned long *next_key, void **next_value);
 
+#include "xf86mm.h"
+
 #endif
diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h
new file mode 100644
index 0000000..bd0d281
--- /dev/null
+++ b/libdrm/xf86mm.h
@@ -0,0 +1,209 @@
+/**************************************************************************
+ * 
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
+ * All Rights Reserved.
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ * 
+ * 
+ **************************************************************************/
+
+#ifndef _XF86MM_H_
+#define _XF86MM_H_
+#include <stddef.h>
+#include "drm.h"
+
+/*
+ * Note on multithreaded applications using this interface.
+ * Libdrm is not threadsafe, so common buffer, TTM, and fence objects need to
+ * be protected using an external mutex.
+ *
+ * Note: Don't protect the following functions, as it may lead to deadlocks:
+ * drmBOUnmap(), drmFenceBuffers().
+ * The kernel is synchronizing and refcounting buffer maps. 
+ * User space only needs to refcount object usage within the same application.
+ */
+
+
+/*
+ * List macros heavily inspired by the Linux kernel
+ * list handling. No list looping yet.
+ */
+
+typedef struct _drmMMListHead
+{
+    struct _drmMMListHead *prev;
+    struct _drmMMListHead *next;
+} drmMMListHead;
+
+#define DRMINITLISTHEAD(__item)		       \
+  do{					       \
+    (__item)->prev = (__item);		       \
+    (__item)->next = (__item);		       \
+  } while (0)
+
+#define DRMLISTADD(__item, __list)			\
+  do {						\
+    (__item)->prev = (__list);			\
+    (__item)->next = (__list)->next;		\
+    (__list)->next->prev = (__item);		\
+    (__list)->next = (__item);			\
+  } while (0)
+
+#define DRMLISTADDTAIL(__item, __list)		\
+  do {						\
+    (__item)->next = (__list);			\
+    (__item)->prev = (__list)->prev;		\
+    (__list)->prev->next = (__item);		\
+    (__list)->prev = (__item);			\
+  } while(0)
+
+#define DRMLISTDEL(__item)			\
+  do {						\
+    (__item)->prev->next = (__item)->next;	\
+    (__item)->next->prev = (__item)->prev;	\
+  } while(0)
+
+#define DRMLISTDELINIT(__item)			\
+  do {						\
+    (__item)->prev->next = (__item)->next;	\
+    (__item)->next->prev = (__item)->prev;	\
+    (__item)->next = (__item);			\
+    (__item)->prev = (__item);			\
+  } while(0)
+
+#define DRMLISTENTRY(__type, __item, __field)   \
+    ((__type *)(((char *) (__item)) - offsetof(__type, __field)))
+
+typedef struct _drmFence{
+        unsigned handle;
+        int class;
+        unsigned type; 
+        unsigned flags;
+        unsigned signaled;
+        unsigned pad[4]; /* for future expansion */
+} drmFence;
+
+typedef struct _drmBO{
+    drm_bo_type_t type;
+    unsigned handle;
+    drm_u64_t mapHandle;
+    unsigned flags;
+    unsigned mask;
+    unsigned mapFlags;
+    unsigned long size;
+    unsigned long offset;
+    unsigned long start;
+    unsigned replyFlags;
+    unsigned fenceFlags;
+    unsigned pageAlignment;
+    void *virtual;
+    void *mapVirtual;
+    int mapCount;
+    unsigned pad[8];     /* for future expansion */
+} drmBO;
+
+
+typedef struct _drmBONode {
+    drmMMListHead head;
+    drmBO *buf;
+    drm_bo_arg_t bo_arg;
+    unsigned long arg0;
+    unsigned long arg1;
+} drmBONode;
+
+typedef struct _drmBOList {
+    unsigned numTarget;
+    unsigned numCurrent;
+    unsigned numOnList;
+    drmMMListHead list;
+    drmMMListHead free;
+} drmBOList;
+
+/* Fencing */
+
+extern int           drmFenceCreate(int fd, unsigned flags, int class,
+				    unsigned type, 
+				    drmFence *fence);
+extern int           drmFenceDestroy(int fd, const drmFence *fence);
+extern int           drmFenceReference(int fd, unsigned handle, drmFence *fence);
+extern int           drmFenceUnreference(int fd, const drmFence *fence);
+extern int           drmFenceFlush(int fd, drmFence *fence, unsigned flush_type);
+extern int           drmFenceSignaled(int fd, drmFence *fence, 
+				      unsigned fenceType, int *signaled);
+extern int           drmFenceWait(int fd, unsigned flags, drmFence *fence, 
+				  unsigned flush_type);
+extern int           drmFenceEmit(int fd, unsigned flags, drmFence *fence, 
+				  unsigned emit_type);
+extern int           drmFenceBuffers(int fd, unsigned flags, drmFence *fence);
+
+
+/*
+ * Buffer object list functions.
+ */
+
+extern void drmBOFreeList(drmBOList *list);
+extern int drmBOResetList(drmBOList *list);
+extern void *drmBOListIterator(drmBOList *list);
+extern void *drmBOListNext(drmBOList *list, void *iterator);
+extern drmBO *drmBOListBuf(void *iterator);
+extern int drmBOCreateList(int numTarget, drmBOList *list);
+
+/*
+ * Buffer object functions.
+ */
+
+extern int drmBOCreate(int fd, unsigned long start, unsigned long size,
+		       unsigned pageAlignment,void *user_buffer, 
+		       drm_bo_type_t type, unsigned mask,
+		       unsigned hint, drmBO *buf);
+extern int drmBODestroy(int fd, drmBO *buf);
+extern int drmBOReference(int fd, unsigned handle, drmBO *buf);
+extern int drmBOUnReference(int fd, drmBO *buf);
+extern int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
+		    void **address);
+extern int drmBOUnmap(int fd, drmBO *buf);
+extern int drmBOValidate(int fd, drmBO *buf, unsigned flags, unsigned mask, 
+			 unsigned hint);
+extern int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle);
+extern int drmBOInfo(int fd, drmBO *buf);
+extern int drmBOBusy(int fd, drmBO *buf, int *busy);
+
+
+extern int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags, 
+		       unsigned mask,
+		       int *newItem);
+extern int drmBOValidateList(int fd, drmBOList *list);
+extern int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle);
+extern int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint);
+
+/*
+ * Initialization functions.
+ */
+
+extern int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize,
+		     unsigned memType);
+extern int drmMMTakedown(int fd, unsigned memType);
+extern int drmMMLock(int fd, unsigned memType);
+extern int drmMMUnlock(int fd, unsigned memType);
+
+
+#endif
diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel
index 71605de..4b7c90b 100644
--- a/linux-core/Makefile.kernel
+++ b/linux-core/Makefile.kernel
@@ -12,13 +12,15 @@
 		drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
 		drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
 		drm_memory_debug.o ati_pcigart.o drm_sman.o \
-		drm_hashtab.o drm_mm.o
+		drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
+	        drm_fence.o drm_ttm.o drm_bo.o
 tdfx-objs   := tdfx_drv.o
 r128-objs   := r128_drv.o r128_cce.o r128_state.o r128_irq.o
 mga-objs    := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
 i810-objs   := i810_drv.o i810_dma.o
 i830-objs   := i830_drv.o i830_dma.o i830_irq.o
-i915-objs   := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
+i915-objs   := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
+		i915_buffer.o
 nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o nouveau_object.o nouveau_irq.o
 radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
 sis-objs    := sis_drv.o sis_mm.o
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index 2bbec70..d02184c 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -41,7 +41,6 @@
  * can build the DRM (part of PI DRI). 4/21/2000 S + B */
 #include <asm/current.h>
 #endif				/* __alpha__ */
-#include <linux/config.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/miscdevice.h>
@@ -84,6 +83,7 @@
 #include <linux/poll.h>
 #include <asm/pgalloc.h>
 #include "drm.h"
+#include <linux/slab.h>
 
 #define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
 #define __OS_HAS_MTRR (defined(CONFIG_MTRR))
@@ -155,9 +155,18 @@
 #define DRM_MEM_CTXLIST   21
 #define DRM_MEM_MM        22
 #define DRM_MEM_HASHTAB   23
+#define DRM_MEM_OBJECTS   24
+#define DRM_MEM_FENCE     25
+#define DRM_MEM_TTM       26
+#define DRM_MEM_BUFOBJ    27
 
 #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
 #define DRM_MAP_HASH_OFFSET 0x10000000
+#define DRM_MAP_HASH_ORDER 12
+#define DRM_OBJECT_HASH_ORDER 12
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
+#define DRM_MM_INIT_MAX_PAGES 256
 
 /*@}*/
 
@@ -388,6 +397,19 @@
 	drm_freelist_t freelist;
 } drm_buf_entry_t;
 
+/*
+ * This should be small enough to allow the use of kmalloc for hash tables
+ * instead of vmalloc.
+ */
+
+#define DRM_FILE_HASH_ORDER 8
+typedef enum{
+	_DRM_REF_USE=0,
+	_DRM_REF_TYPE1,
+	_DRM_NO_REF_TYPES
+} drm_ref_t;
+
+
 /** File private data */
 typedef struct drm_file {
 	int authenticated;
@@ -402,6 +424,18 @@
 	struct drm_head *head;
 	int remove_auth_on_close;
 	unsigned long lock_count;
+	
+	/*
+	 * The user object hash table is global and resides in the
+	 * drm_device structure. We protect the lists and hash tables with the
+	 * device struct_mutex. A bit coarse-grained but probably the best 
+	 * option.
+	 */
+
+        struct list_head refd_objects;
+	struct list_head user_objects;
+
+        drm_open_hash_t refd_object_hash[_DRM_NO_REF_TYPES];
 	void *driver_priv;
 } drm_file_t;
 
@@ -503,6 +537,26 @@
 	drm_hw_lock_t *lock;
 } drm_sigdata_t;
 
+
+/* 
+ * Generic memory manager structs
+ */
+
+typedef struct drm_mm_node {
+	struct list_head fl_entry;
+	struct list_head ml_entry;
+	int free;
+	unsigned long start;
+	unsigned long size;
+        struct drm_mm *mm;
+	void *private;
+} drm_mm_node_t;
+
+typedef struct drm_mm {
+	drm_mm_node_t root_node;
+} drm_mm_t;
+
+
 /**
  * Mappings list
  */
@@ -510,7 +564,8 @@
 	struct list_head head;		/**< list head */
 	drm_hash_item_t hash;
 	drm_map_t *map;			/**< mapping */
-	unsigned int user_token;
+	drm_u64_t user_token;
+        drm_mm_node_t *file_offset_node;
 } drm_map_list_t;
 
 typedef drm_map_t drm_local_map_t;
@@ -543,22 +598,77 @@
 	drm_local_map_t mapping;
 } drm_ati_pcigart_info;
 
-/* 
- * Generic memory manager structs
+/*
+ * User space objects and their references.
  */
 
-typedef struct drm_mm_node {
-	struct list_head fl_entry;
-	struct list_head ml_entry;
-	int free;
-	unsigned long start;
-	unsigned long size;
-	void *private;
-} drm_mm_node_t;
+#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
 
-typedef struct drm_mm {
-	drm_mm_node_t root_node;
-} drm_mm_t;
+typedef enum {
+		drm_fence_type,
+		drm_buffer_type,
+		drm_ttm_type
+
+		/*
+		 * Add other user space object types here. 
+		 */
+
+} drm_object_type_t;
+
+
+
+
+/*
+ * A user object is a structure that helps the drm give out user handles
+ * to kernel internal objects and to keep track of these objects so that 
+ * they can be destroyed, for example when the user space process exits.
+ * Designed to be accessible using a user space 32-bit handle. 
+ */
+
+typedef struct drm_user_object{
+	drm_hash_item_t hash;
+	struct list_head list;
+	drm_object_type_t type;
+        atomic_t refcount;
+        int shareable;
+        drm_file_t *owner;
+	void (*ref_struct_locked) (drm_file_t *priv, struct drm_user_object *obj, 
+				   drm_ref_t ref_action); 
+	void (*unref)(drm_file_t *priv, struct drm_user_object *obj, 
+		      drm_ref_t unref_action);
+	void (*remove)(drm_file_t *priv, struct drm_user_object *obj);
+} drm_user_object_t;
+
+/*
+ * A ref object is a structure which is used to
+ * keep track of references to user objects and to keep track of these
+ * references so that they can be destroyed for example when the user space
+ * process exits. Designed to be accessible using a pointer to the _user_ object.
+ */
+
+
+typedef struct drm_ref_object {
+	drm_hash_item_t hash;
+	struct list_head list;
+	atomic_t refcount;
+	drm_ref_t unref_action;
+} drm_ref_object_t;
+
+
+#include "drm_ttm.h"
+
+/*
+ * buffer object driver
+ */
+
+typedef struct drm_bo_driver{
+	int cached[DRM_BO_MEM_TYPES];
+        drm_local_map_t *iomap[DRM_BO_MEM_TYPES];
+	drm_ttm_backend_t *(*create_ttm_backend_entry) 
+		(struct drm_device *dev);
+	int (*fence_type)(uint32_t flags, uint32_t *class, uint32_t *type);
+	int (*invalidate_caches)(struct drm_device *dev, uint32_t flags);
+} drm_bo_driver_t;
 
 
 /**
@@ -566,6 +676,7 @@
  * a family of cards. There will one drm_device for each card present
  * in this family
  */
+
 struct drm_device;
 struct drm_driver {
 	int (*load) (struct drm_device *, unsigned long flags);
@@ -612,6 +723,9 @@
 	unsigned long (*get_reg_ofs) (struct drm_device * dev);
 	void (*set_version) (struct drm_device * dev, drm_set_version_t * sv);
 
+        struct drm_fence_driver *fence_driver;
+	struct drm_bo_driver *bo_driver;
+        
 	int major;
 	int minor;
 	int patchlevel;
@@ -641,6 +755,71 @@
 	struct class_device *dev_class;
 } drm_head_t;
 
+typedef struct drm_cache {
+
+	/*
+	 * Memory caches
+	 */
+
+	kmem_cache_t *mm;
+	kmem_cache_t *fence_object;
+} drm_cache_t;
+
+
+
+typedef struct drm_fence_driver{
+	int no_types;
+	uint32_t wrap_diff;
+	uint32_t flush_diff;
+        uint32_t sequence_mask;
+        int lazy_capable;
+	int (*emit) (struct drm_device *dev, uint32_t flags,
+		     uint32_t *breadcrumb,
+		     uint32_t *native_type);
+	void (*poke_flush) (struct drm_device *dev);
+} drm_fence_driver_t;
+
+#define _DRM_FENCE_TYPE_EXE 0x00
+
+typedef struct drm_fence_manager{
+        int initialized;
+	rwlock_t lock;
+
+	/*
+	 * The list below should be maintained in sequence order and 
+	 * access is protected by the above spinlock.
+	 */
+
+	struct list_head ring;
+	struct list_head *fence_types[32];
+	volatile uint32_t pending_flush;
+	wait_queue_head_t fence_queue;
+	int pending_exe_flush;
+	uint32_t last_exe_flush;
+	uint32_t exe_flush_sequence;
+        atomic_t count;
+} drm_fence_manager_t;
+
+typedef struct drm_buffer_manager{
+	struct mutex init_mutex;
+	int nice_mode;
+	int initialized;
+        drm_file_t *last_to_validate;
+	int has_type[DRM_BO_MEM_TYPES];
+        int use_type[DRM_BO_MEM_TYPES];
+	drm_mm_t manager[DRM_BO_MEM_TYPES];
+	struct list_head lru[DRM_BO_MEM_TYPES];
+        struct list_head pinned[DRM_BO_MEM_TYPES];
+	struct list_head unfenced;
+	struct list_head ddestroy;
+        struct work_struct wq;
+        uint32_t fence_type;
+        unsigned long cur_pages;
+        atomic_t count;
+} drm_buffer_manager_t;
+
+
+
 /**
  * DRM device structure. This structure represent a complete card that
  * may contain multiple heads.
@@ -687,7 +866,11 @@
 	/*@{ */
 	drm_map_list_t *maplist;	/**< Linked list of regions */
 	int map_count;			/**< Number of mappable regions */
-	drm_open_hash_t map_hash;       /**< User token hash table for maps */
+        drm_open_hash_t map_hash;       /**< User token hash table for maps */
+        drm_mm_t offset_manager;        /**< User token manager */
+        drm_open_hash_t object_hash;    /**< User token hash table for objects */
+        struct address_space *dev_mapping;  /**< For unmap_mapping_range() */
+        struct page *ttm_dummy_page;
 
 	/** \name Context handle management */
 	/*@{ */
@@ -774,6 +957,9 @@
 	unsigned int agp_buffer_token;
 	drm_head_t primary;		/**< primary screen head */
 
+	drm_fence_manager_t fm;
+	drm_buffer_manager_t bm;
+  
 	/** \name Drawable information */
 	/*@{ */
 	spinlock_t drw_lock;
@@ -784,6 +970,75 @@
 	/*@} */
 } drm_device_t;
 
+#if __OS_HAS_AGP
+typedef struct drm_agp_ttm_priv {
+	DRM_AGP_MEM *mem;
+	struct agp_bridge_data *bridge;
+	unsigned alloc_type;
+	unsigned cached_type;
+	unsigned uncached_type;
+	int populated;
+} drm_agp_ttm_priv;
+#endif
+
+typedef struct drm_fence_object{
+	drm_user_object_t base;
+        atomic_t usage;
+
+	/*
+	 * The below three fields are protected by the fence manager spinlock.
+	 */
+
+	struct list_head ring;
+        int class;
+        uint32_t native_type;
+	uint32_t type;
+	uint32_t signaled;
+	uint32_t sequence;
+	uint32_t flush_mask;
+	uint32_t submitted_flush;
+} drm_fence_object_t;
+
+
+typedef struct drm_buffer_object{
+	drm_device_t *dev;
+	drm_user_object_t base;
+
+    /*
+     * If there is a possibility that the usage variable is zero,
+     * then dev->struct_mutext should be locked before incrementing it.
+     */
+
+	atomic_t usage;
+	drm_ttm_object_t *ttm_object;
+        drm_ttm_t *ttm;
+	unsigned long num_pages;
+        unsigned long buffer_start;
+        drm_bo_type_t type;
+        unsigned long offset;
+        uint32_t page_alignment;
+	atomic_t mapped;
+	uint32_t flags;
+	uint32_t mask;
+
+	drm_mm_node_t *node_ttm;    /* MM node for on-card RAM */
+	drm_mm_node_t *node_card;   /* MM node for ttm*/
+	struct list_head lru_ttm;   /* LRU for the ttm pages*/
+        struct list_head lru_card;  /* For memory types with on-card RAM */
+	struct list_head ddestroy;
+
+	uint32_t fence_type;
+        uint32_t fence_class;
+	drm_fence_object_t *fence;
+        uint32_t priv_flags;
+	wait_queue_head_t event_queue;
+        struct mutex mutex;
+} drm_buffer_object_t;
+
+#define _DRM_BO_FLAG_UNFENCED 0x00000001
+#define _DRM_BO_FLAG_EVICTED  0x00000002
+
+
 static __inline__ int drm_core_check_feature(struct drm_device *dev,
 					     int feature)
 {
@@ -841,6 +1096,7 @@
 #define drm_core_has_MTRR(dev) (0)
 #endif
 
+
 /******************************************************************/
 /** \name Internal function definitions */
 /*@{*/
@@ -869,6 +1125,7 @@
 extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
 extern unsigned long drm_core_get_map_ofs(drm_map_t * map);
 extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
+extern pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma);
 
 				/* Memory management support (drm_memory.h) */
 #include "drm_memory.h"
@@ -884,6 +1141,14 @@
 extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
 extern int drm_unbind_agp(DRM_AGP_MEM * handle);
 
+extern void drm_free_memctl(size_t size);
+extern int drm_alloc_memctl(size_t size);
+extern void drm_query_memctl(drm_u64_t *cur_used,
+			     drm_u64_t *low_threshold,
+			     drm_u64_t *high_threshold); 
+extern void drm_init_memctl(size_t low_threshold,
+			    size_t high_threshold);
+
 				/* Misc. IOCTL support (drm_ioctl.h) */
 extern int drm_irq_by_busid(struct inode *inode, struct file *filp,
 			    unsigned int cmd, unsigned long arg);
@@ -951,6 +1216,13 @@
 extern int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context);
 extern int drm_lock_free(drm_device_t * dev,
 			 __volatile__ unsigned int *lock, unsigned int context);
+/*
+ * These are exported to drivers so that they can implement fencing using
+ * DMA quiscent + idle. DMA quiescent usually requires the hardware lock. 
+ */
+
+extern int drm_i_have_hw_lock(struct file *filp);
+extern int drm_kernel_take_hw_lock(struct file *filp);
 
 				/* Buffer management support (drm_bufs.h) */
 extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request);
@@ -1036,7 +1308,8 @@
 extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
 extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
 extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
-
+extern drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev,
+					   drm_ttm_backend_t *backend);
 				/* Stub support (drm_stub.h) */
 extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
 		     struct drm_driver *driver);
@@ -1045,6 +1318,7 @@
 extern unsigned int drm_debug; /* 1 to enable debug output */
 extern unsigned int drm_cards_limit;
 extern drm_head_t **drm_heads;
+extern drm_cache_t drm_cache;
 extern struct drm_sysfs_class *drm_class;
 extern struct proc_dir_entry *drm_proc_root;
 
@@ -1088,11 +1362,121 @@
 
 extern drm_mm_node_t * drm_mm_get_block(drm_mm_node_t * parent, unsigned long size,
 					       unsigned alignment);
-extern void drm_mm_put_block(drm_mm_t *mm, drm_mm_node_t *cur);
+extern void drm_mm_put_block(drm_mm_node_t *cur);
 extern drm_mm_node_t *drm_mm_search_free(const drm_mm_t *mm, unsigned long size, 
 						unsigned alignment, int best_match);
 extern int drm_mm_init(drm_mm_t *mm, unsigned long start, unsigned long size);
 extern void drm_mm_takedown(drm_mm_t *mm);
+extern int drm_mm_clean(drm_mm_t *mm);
+extern unsigned long drm_mm_tail_space(drm_mm_t *mm);
+extern int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size);
+extern int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size);
+
+static inline drm_mm_t *drm_get_mm(drm_mm_node_t *block)
+{
+	return block->mm;
+}
+  
+
+/*
+ * User space object bookkeeping (drm_object.c)
+ */
+
+/*
+ * Must be called with the struct_mutex held.
+ */
+
+extern int drm_add_user_object(drm_file_t *priv, drm_user_object_t *item, 
+
+/*
+ * Must be called with the struct_mutex held.
+ */
+			       int shareable);
+extern drm_user_object_t *drm_lookup_user_object(drm_file_t *priv, uint32_t key);
+
+/*
+ * Must be called with the struct_mutex held.
+ * If "item" has been obtained by a call to drm_lookup_user_object. You may not
+ * release the struct_mutex before calling drm_remove_ref_object.
+ * This function may temporarily release the struct_mutex.
+ */
+
+extern int drm_remove_user_object(drm_file_t *priv, drm_user_object_t *item);
+
+/*
+ * Must be called with the struct_mutex held. May temporarily release it.
+ */
+
+extern int drm_add_ref_object(drm_file_t *priv, drm_user_object_t *referenced_object,
+			      drm_ref_t ref_action);
+
+/*
+ * Must be called with the struct_mutex held.
+ */
+
+drm_ref_object_t *drm_lookup_ref_object(drm_file_t *priv, 
+					drm_user_object_t *referenced_object,
+					drm_ref_t ref_action);
+/*
+ * Must be called with the struct_mutex held.
+ * If "item" has been obtained by a call to drm_lookup_ref_object. You may not
+ * release the struct_mutex before calling drm_remove_ref_object.
+ * This function may temporarily release the struct_mutex.
+ */
+
+extern void drm_remove_ref_object(drm_file_t *priv, drm_ref_object_t *item);
+extern int drm_user_object_ref(drm_file_t *priv, uint32_t user_token, drm_object_type_t type,
+			       drm_user_object_t **object);
+extern int drm_user_object_unref(drm_file_t *priv, uint32_t user_token, drm_object_type_t type);
+
+
+
+/*
+ * fence objects (drm_fence.c)
+ */
+
+extern void drm_fence_handler(drm_device_t *dev, uint32_t breadcrumb, uint32_t type);
+extern void drm_fence_manager_init(drm_device_t *dev);
+extern void drm_fence_manager_takedown(drm_device_t *dev);
+extern void drm_fence_flush_old(drm_device_t *dev, uint32_t sequence);
+extern int drm_fence_object_flush(drm_device_t * dev,
+				  volatile drm_fence_object_t * fence, 
+				  uint32_t type);
+extern int drm_fence_object_signaled(volatile drm_fence_object_t * fence, 
+				     uint32_t type);
+extern void drm_fence_usage_deref_locked(drm_device_t * dev,
+					 drm_fence_object_t * fence);
+extern void drm_fence_usage_deref_unlocked(drm_device_t * dev,
+					 drm_fence_object_t * fence);
+extern int drm_fence_object_wait(drm_device_t * dev, 
+				 volatile drm_fence_object_t * fence,
+				 int lazy, int ignore_signals, uint32_t mask);
+extern int drm_fence_object_create(drm_device_t *dev, uint32_t type,
+				   uint32_t fence_flags, 
+				   drm_fence_object_t **c_fence);
+extern int drm_fence_add_user_object(drm_file_t *priv, 
+				     drm_fence_object_t *fence,
+				     int shareable);
+
+
+
+
+
+extern int drm_fence_ioctl(DRM_IOCTL_ARGS);
+
+/*
+ * buffer objects (drm_bo.c)
+ */
+
+extern int drm_bo_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS);
+extern int drm_bo_driver_finish(drm_device_t *dev);
+extern int drm_bo_driver_init(drm_device_t *dev);
+extern int drm_fence_buffer_objects(drm_file_t * priv,
+				    struct list_head *list, 
+				    uint32_t fence_flags,
+				    drm_fence_object_t *fence,
+				    drm_fence_object_t **used_fence);
 
 
 /* Inline replacements for DRM_IOREMAP macros */
@@ -1164,6 +1548,58 @@
 extern void drm_free(void *pt, size_t size, int area);
 #endif
 
+/*
+ * Accounting variants of standard calls.
+ */
+
+static inline void *drm_ctl_alloc(size_t size, int area)
+{
+	void *ret;
+	if (drm_alloc_memctl(size))
+		return NULL;
+	ret = drm_alloc(size, area);
+	if (!ret)
+		drm_free_memctl(size);
+	return ret;
+}
+
+static inline void *drm_ctl_calloc(size_t nmemb, size_t size, int area)
+{
+	void *ret;
+
+	if (drm_alloc_memctl(nmemb*size))
+		return NULL;
+	ret = drm_calloc(nmemb, size, area);
+	if (!ret)
+		drm_free_memctl(nmemb*size);
+	return ret;
+}
+
+static inline void drm_ctl_free(void *pt, size_t size, int area)
+{
+	drm_free(pt, size, area);
+	drm_free_memctl(size);
+}
+
+static inline void *drm_ctl_cache_alloc(kmem_cache_t *cache, size_t size, 
+					int flags)
+{
+	void *ret;
+	if (drm_alloc_memctl(size))
+		return NULL;
+	ret = kmem_cache_alloc(cache, flags);
+	if (!ret)
+		drm_free_memctl(size);
+	return ret;
+}
+
+static inline void drm_ctl_cache_free(kmem_cache_t *cache, size_t size,
+				      void *obj)
+{
+	kmem_cache_free(cache, obj);
+	drm_free_memctl(size);
+}
+
 /*@}*/
 
 #endif				/* __KERNEL__ */
diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c
index dce27cd..a5f1f9e 100644
--- a/linux-core/drm_agpsupport.c
+++ b/linux-core/drm_agpsupport.c
@@ -552,4 +552,162 @@
 	return agp_unbind_memory(handle);
 }
 
+
+
+/*
+ * AGP ttm backend interface.
+ */
+
+#ifndef AGP_USER_TYPES
+#define AGP_USER_TYPES (1 << 16)
+#define AGP_USER_MEMORY (AGP_USER_TYPES)
+#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
+#endif
+
+static int drm_agp_needs_unbind_cache_adjust(drm_ttm_backend_t *backend) {
+	return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
+}
+
+
+static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages, 
+			    struct page **pages) {
+
+	drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
+	struct page **cur_page, **last_page = pages + num_pages;
+	DRM_AGP_MEM *mem;
+
+	if (drm_alloc_memctl(num_pages * sizeof(void *)))
+		return -1;
+
+	DRM_DEBUG("drm_agp_populate_ttm\n");
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
+	mem = drm_agp_allocate_memory(num_pages, agp_priv->alloc_type);
+#else
+	mem = drm_agp_allocate_memory(agp_priv->bridge, num_pages, agp_priv->alloc_type);
+#endif
+	if (!mem) {
+		drm_free_memctl(num_pages *sizeof(void *));
+		return -1;
+	}
+
+	DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);
+	mem->page_count = 0;
+	for (cur_page = pages; cur_page < last_page; ++cur_page) {
+		mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(*cur_page));
+	}
+	agp_priv->mem = mem;
+	return 0;
+}
+
+static int drm_agp_bind_ttm(drm_ttm_backend_t *backend, 
+			    unsigned long offset,
+			    int cached) 
+{
+	drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
+	DRM_AGP_MEM *mem = agp_priv->mem;
+	int ret;
+
+	DRM_DEBUG("drm_agp_bind_ttm\n");
+	DRM_MASK_VAL(backend->flags, DRM_BE_FLAG_BOUND_CACHED,
+		     (cached) ? DRM_BE_FLAG_BOUND_CACHED : 0);
+	mem->is_flushed = TRUE;
+	mem->type = (cached) ? agp_priv->cached_type : agp_priv->uncached_type;
+	ret = drm_agp_bind_memory(mem, offset);
+	if (ret) {
+		DRM_ERROR("AGP Bind memory failed\n");
+	}
+	return ret;
+}
+
+static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) {
+
+	drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
+
+	DRM_DEBUG("drm_agp_unbind_ttm\n");
+	if (agp_priv->mem->is_bound)
+		return drm_agp_unbind_memory(agp_priv->mem);
+	else
+		return 0;
+}
+
+static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) {
+
+	drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
+	DRM_AGP_MEM *mem = agp_priv->mem;
+
+	DRM_DEBUG("drm_agp_clear_ttm\n");
+	if (mem) {
+		unsigned long num_pages = mem->page_count;
+		backend->unbind(backend);
+		agp_free_memory(mem);
+		drm_free_memctl(num_pages *sizeof(void *));
+	}
+
+	agp_priv->mem = NULL;
+}
+
+static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) {
+
+	drm_agp_ttm_priv *agp_priv; 
+	
+	if (backend) {
+		DRM_DEBUG("drm_agp_destroy_ttm\n");
+		agp_priv = (drm_agp_ttm_priv *) backend->private;
+		if (agp_priv) {
+			if (agp_priv->mem) {
+				backend->clear(backend);
+			}
+			drm_ctl_free(agp_priv, sizeof(*agp_priv), DRM_MEM_MAPPINGS);
+			backend->private = NULL;
+		}
+		if (backend->flags & DRM_BE_FLAG_NEEDS_FREE) {
+			drm_ctl_free(backend, sizeof(*backend), DRM_MEM_MAPPINGS);                     
+		}
+	}
+}
+	
+
+drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev,
+				    drm_ttm_backend_t *backend)
+{
+
+        drm_ttm_backend_t *agp_be;
+	drm_agp_ttm_priv *agp_priv;
+
+	agp_be = (backend != NULL) ? backend:
+		drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_MAPPINGS);
+
+	if (!agp_be)
+		return NULL;
+	
+	agp_priv = drm_ctl_calloc(1, sizeof(*agp_priv), DRM_MEM_MAPPINGS);
+	
+	if (!agp_priv) {
+		drm_ctl_free(agp_be, sizeof(*agp_be), DRM_MEM_MAPPINGS);
+		return NULL;
+	}
+	
+	agp_priv->mem = NULL;
+	agp_priv->alloc_type = AGP_USER_MEMORY;
+	agp_priv->cached_type = AGP_USER_CACHED_MEMORY;
+	agp_priv->uncached_type = AGP_USER_MEMORY;
+	agp_priv->bridge = dev->agp->bridge;
+	agp_priv->populated = FALSE;
+	agp_be->aperture_base = dev->agp->agp_info.aper_base;
+	agp_be->private = (void *) agp_priv;
+	agp_be->needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust;
+	agp_be->populate = drm_agp_populate;
+	agp_be->clear = drm_agp_clear_ttm;
+	agp_be->bind = drm_agp_bind_ttm;
+	agp_be->unbind = drm_agp_unbind_ttm;
+	agp_be->destroy = drm_agp_destroy_ttm;
+	DRM_MASK_VAL(agp_be->flags, DRM_BE_FLAG_NEEDS_FREE,
+		     (backend == NULL) ? DRM_BE_FLAG_NEEDS_FREE : 0);
+	DRM_MASK_VAL(agp_be->flags, DRM_BE_FLAG_CBA,
+		     (dev->agp->cant_use_aperture) ? DRM_BE_FLAG_CBA : 0);
+	agp_be->drm_map_type = _DRM_AGP;
+	return agp_be;
+}
+EXPORT_SYMBOL(drm_agp_init_ttm);
+
 #endif				/* __OS_HAS_AGP */
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
new file mode 100644
index 0000000..65e24fb
--- /dev/null
+++ b/linux-core/drm_bo.c
@@ -0,0 +1,1998 @@
+/**************************************************************************
+ * 
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ * 
+ * 
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+
+/*
+ * Buffer object locking policy:
+ * Lock dev->struct_mutex;
+ * Increase usage
+ * Unlock dev->struct_mutex;
+ * Lock buffer->mutex;
+ * Do whatever you want;
+ * Unlock buffer->mutex;
+ * Decrease usage. Call destruction if zero.
+ *
+ * User object visibility ups usage just once, since it has its own 
+ * refcounting.
+ *
+ * Destruction:
+ * lock dev->struct_mutex;
+ * Verify that usage is zero. Otherwise unlock and continue.
+ * Destroy object.
+ * unlock dev->struct_mutex;
+ *
+ * Mutex and spinlock locking orders:
+ * 1.) Buffer mutex
+ * 2.) Refer to ttm locking orders.
+ */
+
+#define DRM_FLAG_MASKED(_old, _new, _mask) {\
+(_old) ^= (((_old) ^ (_new)) & (_mask)); \
+}
+
+static inline uint32_t drm_bo_type_flags(unsigned type)
+{
+	return (1 << (24 + type));
+}
+
+static inline drm_buffer_object_t *drm_bo_entry(struct list_head *list,
+						unsigned type)
+{
+	switch (type) {
+	case DRM_BO_MEM_LOCAL:
+	case DRM_BO_MEM_TT:
+		return list_entry(list, drm_buffer_object_t, lru_ttm);
+	case DRM_BO_MEM_VRAM:
+	case DRM_BO_MEM_VRAM_NM:
+		return list_entry(list, drm_buffer_object_t, lru_card);
+	default:
+		BUG_ON(1);
+	}
+	return NULL;
+}
+
+static inline drm_mm_node_t *drm_bo_mm_node(drm_buffer_object_t * bo,
+					    unsigned type)
+{
+	switch (type) {
+	case DRM_BO_MEM_LOCAL:
+	case DRM_BO_MEM_TT:
+		return bo->node_ttm;
+	case DRM_BO_MEM_VRAM:
+	case DRM_BO_MEM_VRAM_NM:
+		return bo->node_card;
+	default:
+		BUG_ON(1);
+	}
+	return NULL;
+}
+
+/*
+ * bo locked. dev->struct_mutex locked.
+ */
+
+static void drm_bo_add_to_lru(drm_buffer_object_t * buf,
+			      drm_buffer_manager_t * bm)
+{
+	struct list_head *list;
+	unsigned mem_type;
+
+	if (buf->flags & DRM_BO_FLAG_MEM_TT) {
+		mem_type = DRM_BO_MEM_TT;
+		list =
+		    (buf->
+		     flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ?
+		    &bm->pinned[mem_type] : &bm->lru[mem_type];
+		list_add_tail(&buf->lru_ttm, list);
+	} else {
+		mem_type = DRM_BO_MEM_LOCAL;
+		list =
+		    (buf->
+		     flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ?
+		    &bm->pinned[mem_type] : &bm->lru[mem_type];
+		list_add_tail(&buf->lru_ttm, list);
+	}
+	if (buf->flags & DRM_BO_FLAG_MEM_VRAM) {
+		mem_type = DRM_BO_MEM_VRAM;
+		list =
+		    (buf->
+		     flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ?
+		    &bm->pinned[mem_type] : &bm->lru[mem_type];
+		list_add_tail(&buf->lru_card, list);
+	}
+}
+
+/*
+ * bo locked.
+ */
+
+static int drm_move_tt_to_local(drm_buffer_object_t * buf, int evict,
+				int force_no_move)
+{
+	drm_device_t *dev = buf->dev;
+	int ret;
+
+	if (buf->node_ttm) {
+		mutex_lock(&dev->struct_mutex);
+		if (evict)
+			ret = drm_evict_ttm(buf->ttm);
+		else
+			ret = drm_unbind_ttm(buf->ttm);
+
+		if (ret) {
+			mutex_unlock(&dev->struct_mutex);
+			if (ret == -EAGAIN)
+				schedule();
+			return ret;
+		}
+
+		if (!(buf->flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) {
+			drm_mm_put_block(buf->node_ttm);
+			buf->node_ttm = NULL;
+		}
+		mutex_unlock(&dev->struct_mutex);
+	}
+
+	buf->flags &= ~DRM_BO_FLAG_MEM_TT;
+	buf->flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
+
+	return 0;
+}
+
+/*
+ * Lock dev->struct_mutex
+ */
+
+static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo)
+{
+
+	drm_buffer_manager_t *bm = &dev->bm;
+
+	DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
+
+	/*
+	 * Somone might try to access us through the still active BM lists.
+	 */
+
+	if (atomic_read(&bo->usage) != 0)
+		return;
+	if (!list_empty(&bo->ddestroy))
+		return;
+
+	if (bo->fence) {
+		if (!drm_fence_object_signaled(bo->fence, bo->fence_type)) {
+
+			drm_fence_object_flush(dev, bo->fence, bo->fence_type);
+			list_add_tail(&bo->ddestroy, &bm->ddestroy);
+			schedule_delayed_work(&bm->wq,
+					      ((DRM_HZ / 100) <
+					       1) ? 1 : DRM_HZ / 100);
+			return;
+		} else {
+			drm_fence_usage_deref_locked(dev, bo->fence);
+			bo->fence = NULL;
+		}
+	}
+	/*
+	 * Take away from lru lists.
+	 */
+
+	list_del_init(&bo->lru_ttm);
+	list_del_init(&bo->lru_card);
+
+	if (bo->ttm) {
+		unsigned long _end = jiffies + DRM_HZ;
+		int ret;
+
+		/*
+		 * This temporarily unlocks struct_mutex. 
+		 */
+
+		do {
+			ret = drm_unbind_ttm(bo->ttm);
+			if (ret == -EAGAIN) {
+				mutex_unlock(&dev->struct_mutex);
+				schedule();
+				mutex_lock(&dev->struct_mutex);
+			}
+		} while (ret == -EAGAIN && !time_after_eq(jiffies, _end));
+
+		if (ret) {
+			DRM_ERROR("Couldn't unbind buffer. "
+				  "Bad. Continuing anyway\n");
+		}
+	}
+
+	if (bo->node_ttm) {
+		drm_mm_put_block(bo->node_ttm);
+		bo->node_ttm = NULL;
+	}
+	if (bo->node_card) {
+		drm_mm_put_block(bo->node_card);
+		bo->node_card = NULL;
+	}
+	if (bo->ttm_object) {
+		drm_ttm_object_deref_locked(dev, bo->ttm_object);
+	}
+	atomic_dec(&bm->count);
+	drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
+}
+
+/*
+ * Call bo->mutex locked.
+ * Wait until the buffer is idle.
+ */
+
+static int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
+		       int no_wait)
+{
+
+	drm_fence_object_t *fence = bo->fence;
+	int ret;
+
+	if (fence) {
+		drm_device_t *dev = bo->dev;
+		if (drm_fence_object_signaled(fence, bo->fence_type)) {
+			drm_fence_usage_deref_unlocked(dev, fence);
+			bo->fence = NULL;
+			return 0;
+		}
+		if (no_wait) {
+			return -EBUSY;
+		}
+		ret =
+		    drm_fence_object_wait(dev, fence, lazy, ignore_signals,
+					  bo->fence_type);
+		if (ret)
+			return ret;
+
+		drm_fence_usage_deref_unlocked(dev, fence);
+		bo->fence = NULL;
+
+	}
+	return 0;
+}
+
+/*
+ * Call dev->struct_mutex locked.
+ */
+
+static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all)
+{
+	drm_buffer_manager_t *bm = &dev->bm;
+
+	drm_buffer_object_t *entry, *nentry;
+	struct list_head *list, *next;
+	drm_fence_object_t *fence;
+
+	list_for_each_safe(list, next, &bm->ddestroy) {
+		entry = list_entry(list, drm_buffer_object_t, ddestroy);
+		atomic_inc(&entry->usage);
+		if (atomic_read(&entry->usage) != 1) {
+			atomic_dec(&entry->usage);
+			continue;
+		}
+
+		nentry = NULL;
+		if (next != &bm->ddestroy) {
+			nentry = list_entry(next, drm_buffer_object_t,
+					    ddestroy);
+			atomic_inc(&nentry->usage);
+		}
+
+		mutex_unlock(&dev->struct_mutex);
+		mutex_lock(&entry->mutex);
+		fence = entry->fence;
+		if (fence && drm_fence_object_signaled(fence,
+						       entry->fence_type)) {
+			drm_fence_usage_deref_locked(dev, fence);
+			entry->fence = NULL;
+		}
+
+		if (entry->fence && remove_all) {
+			if (bm->nice_mode) {
+				unsigned long _end = jiffies + 3 * DRM_HZ;
+				int ret;
+				do {
+					ret = drm_bo_wait(entry, 0, 1, 0);
+				} while (ret && !time_after_eq(jiffies, _end));
+
+				if (entry->fence) {
+					bm->nice_mode = 0;
+					DRM_ERROR("Detected GPU lockup or "
+						  "fence driver was taken down. "
+						  "Evicting waiting buffers.\n");
+				}
+			}
+			if (entry->fence) {
+				drm_fence_usage_deref_unlocked(dev,
+							       entry->fence);
+				entry->fence = NULL;
+			}
+		}
+		mutex_lock(&dev->struct_mutex);
+		mutex_unlock(&entry->mutex);
+		if (atomic_dec_and_test(&entry->usage) && (!entry->fence)) {
+			list_del_init(&entry->ddestroy);
+			drm_bo_destroy_locked(dev, entry);
+		}
+		if (nentry) {
+			atomic_dec(&nentry->usage);
+		}
+	}
+
+}
+
+static void drm_bo_delayed_workqueue(void *data)
+{
+	drm_device_t *dev = (drm_device_t *) data;
+	drm_buffer_manager_t *bm = &dev->bm;
+
+	DRM_DEBUG("Delayed delete Worker\n");
+
+	mutex_lock(&dev->struct_mutex);
+	if (!bm->initialized) {
+		mutex_unlock(&dev->struct_mutex);
+		return;
+	}
+	drm_bo_delayed_delete(dev, 0);
+	if (bm->initialized && !list_empty(&bm->ddestroy)) {
+		schedule_delayed_work(&bm->wq,
+				      ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
+	}
+	mutex_unlock(&dev->struct_mutex);
+}
+
+void drm_bo_usage_deref_locked(drm_device_t * dev, drm_buffer_object_t * bo)
+{
+	if (atomic_dec_and_test(&bo->usage)) {
+		drm_bo_destroy_locked(dev, bo);
+	}
+}
+
+static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
+{
+	drm_bo_usage_deref_locked(priv->head->dev,
+				  drm_user_object_entry(uo, drm_buffer_object_t,
+							base));
+}
+
+void drm_bo_usage_deref_unlocked(drm_device_t * dev, drm_buffer_object_t * bo)
+{
+	if (atomic_dec_and_test(&bo->usage)) {
+		mutex_lock(&dev->struct_mutex);
+		if (atomic_read(&bo->usage) == 0)
+			drm_bo_destroy_locked(dev, bo);
+		mutex_unlock(&dev->struct_mutex);
+	}
+}
+
+/*
+ * Note. The caller has to register (if applicable) 
+ * and deregister fence object usage.
+ */
+
+int drm_fence_buffer_objects(drm_file_t * priv,
+			     struct list_head *list,
+			     uint32_t fence_flags,
+			     drm_fence_object_t * fence,
+			     drm_fence_object_t ** used_fence)
+{
+	drm_device_t *dev = priv->head->dev;
+	drm_buffer_manager_t *bm = &dev->bm;
+
+	drm_buffer_object_t *entry;
+	uint32_t fence_type = 0;
+	int count = 0;
+	int ret = 0;
+	struct list_head f_list, *l;
+
+	mutex_lock(&dev->struct_mutex);
+
+	if (!list)
+		list = &bm->unfenced;
+
+	list_for_each_entry(entry, list, lru_ttm) {
+		BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
+		fence_type |= entry->fence_type;
+		if (entry->fence_class != 0) {
+			DRM_ERROR("Fence class %d is not implemented yet.\n",
+				  entry->fence_class);
+			ret = -EINVAL;
+			goto out;
+		}
+		count++;
+	}
+
+	if (!count) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * Transfer to a local list before we release the dev->struct_mutex;
+	 * This is so we don't get any new unfenced objects while fencing 
+	 * the ones we already have..
+	 */
+
+	list_add_tail(&f_list, list);
+	list_del_init(list);
+
+	if (fence) {
+		if ((fence_type & fence->type) != fence_type) {
+			DRM_ERROR("Given fence doesn't match buffers "
+				  "on unfenced list.\n");
+			ret = -EINVAL;
+			goto out;
+		}
+	} else {
+		mutex_unlock(&dev->struct_mutex);
+		ret = drm_fence_object_create(dev, fence_type,
+					      fence_flags | DRM_FENCE_FLAG_EMIT,
+					      &fence);
+		mutex_lock(&dev->struct_mutex);
+		if (ret)
+			goto out;
+	}
+
+	count = 0;
+	l = f_list.next;
+	while (l != &f_list) {
+		entry = list_entry(l, drm_buffer_object_t, lru_ttm);
+		atomic_inc(&entry->usage);
+		mutex_unlock(&dev->struct_mutex);
+		mutex_lock(&entry->mutex);
+		mutex_lock(&dev->struct_mutex);
+		list_del_init(l);
+		list_del_init(&entry->lru_card);
+		if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
+			count++;
+			if (entry->fence)
+				drm_fence_usage_deref_locked(dev, entry->fence);
+			entry->fence = fence;
+			DRM_FLAG_MASKED(entry->priv_flags, 0,
+					_DRM_BO_FLAG_UNFENCED);
+			DRM_WAKEUP(&entry->event_queue);
+			drm_bo_add_to_lru(entry, bm);
+		}
+		mutex_unlock(&entry->mutex);
+		drm_bo_usage_deref_locked(dev, entry);
+		l = f_list.next;
+	}
+	atomic_add(count, &fence->usage);
+	DRM_DEBUG("Fenced %d buffers\n", count);
+      out:
+	mutex_unlock(&dev->struct_mutex);
+	*used_fence = fence;
+	return ret;
+}
+
+EXPORT_SYMBOL(drm_fence_buffer_objects);
+
+/*
+ * bo->mutex locked 
+ */
+
+static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
+			int no_wait, int force_no_move)
+{
+	int ret = 0;
+	drm_device_t *dev = bo->dev;
+	drm_buffer_manager_t *bm = &dev->bm;
+
+	/*
+	 * Someone might have modified the buffer before we took the buffer mutex.
+	 */
+
+	if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
+		goto out;
+	if (!(bo->flags & drm_bo_type_flags(mem_type)))
+		goto out;
+
+	ret = drm_bo_wait(bo, 0, 0, no_wait);
+
+	if (ret) {
+		if (ret != -EAGAIN)
+			DRM_ERROR("Failed to expire fence before "
+				  "buffer eviction.\n");
+		goto out;
+	}
+
+	if (mem_type == DRM_BO_MEM_TT) {
+		ret = drm_move_tt_to_local(bo, 1, force_no_move);
+		if (ret)
+			goto out;
+		mutex_lock(&dev->struct_mutex);
+		list_del_init(&bo->lru_ttm);
+		drm_bo_add_to_lru(bo, bm);
+		mutex_unlock(&dev->struct_mutex);
+	}
+#if 0
+	else {
+		ret = drm_move_vram_to_local(bo);
+		mutex_lock(&dev->struct_mutex);
+		list_del_init(&bo->lru_card);
+		mutex_unlock(&dev->struct_mutex);
+	}
+#endif
+	if (ret)
+		goto out;
+
+	DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
+			_DRM_BO_FLAG_EVICTED);
+      out:
+	return ret;
+}
+
+/*
+ * buf->mutex locked.
+ */
+
+int drm_bo_alloc_space(drm_buffer_object_t * buf, unsigned mem_type,
+		       int no_wait)
+{
+	drm_device_t *dev = buf->dev;
+	drm_mm_node_t *node;
+	drm_buffer_manager_t *bm = &dev->bm;
+	drm_buffer_object_t *bo;
+	drm_mm_t *mm = &bm->manager[mem_type];
+	struct list_head *lru;
+	unsigned long size = buf->num_pages;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	do {
+		node = drm_mm_search_free(mm, size, buf->page_alignment, 1);
+		if (node)
+			break;
+
+		lru = &bm->lru[mem_type];
+		if (lru->next == lru)
+			break;
+
+		bo = drm_bo_entry(lru->next, mem_type);
+
+		atomic_inc(&bo->usage);
+		mutex_unlock(&dev->struct_mutex);
+		mutex_lock(&bo->mutex);
+		BUG_ON(bo->flags & DRM_BO_FLAG_NO_MOVE);
+		ret = drm_bo_evict(bo, mem_type, no_wait, 0);
+		mutex_unlock(&bo->mutex);
+		drm_bo_usage_deref_unlocked(dev, bo);
+		if (ret)
+			return ret;
+		mutex_lock(&dev->struct_mutex);
+	} while (1);
+
+	if (!node) {
+		DRM_ERROR("Out of videoram / aperture space\n");
+		mutex_unlock(&dev->struct_mutex);
+		return -ENOMEM;
+	}
+
+	node = drm_mm_get_block(node, size, buf->page_alignment);
+	mutex_unlock(&dev->struct_mutex);
+	BUG_ON(!node);
+	node->private = (void *)buf;
+
+	if (mem_type == DRM_BO_MEM_TT) {
+		buf->node_ttm = node;
+	} else {
+		buf->node_card = node;
+	}
+	buf->offset = node->start * PAGE_SIZE;
+	return 0;
+}
+
+static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait)
+{
+	drm_device_t *dev = bo->dev;
+	drm_ttm_backend_t *be;
+	int ret;
+
+	if (!(bo->node_ttm && (bo->flags & DRM_BO_FLAG_NO_MOVE))) {
+		BUG_ON(bo->node_ttm);
+		ret = drm_bo_alloc_space(bo, DRM_BO_MEM_TT, no_wait);
+		if (ret)
+			return ret;
+	}
+
+	DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->node_ttm->start);
+
+	mutex_lock(&dev->struct_mutex);
+	ret = drm_bind_ttm(bo->ttm, bo->flags & DRM_BO_FLAG_BIND_CACHED,
+			   bo->node_ttm->start);
+	if (ret) {
+		drm_mm_put_block(bo->node_ttm);
+		bo->node_ttm = NULL;
+	}
+	mutex_unlock(&dev->struct_mutex);
+
+	if (ret) {
+		return ret;
+	}
+
+	be = bo->ttm->be;
+	if (be->needs_ub_cache_adjust(be))
+		bo->flags &= ~DRM_BO_FLAG_CACHED;
+	bo->flags &= ~DRM_BO_MASK_MEM;
+	bo->flags |= DRM_BO_FLAG_MEM_TT;
+
+	if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
+		ret = dev->driver->bo_driver->invalidate_caches(dev, bo->flags);
+		if (ret)
+			DRM_ERROR("Could not flush read caches\n");
+	}
+	DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_EVICTED);
+
+	return 0;
+}
+
+static int drm_bo_new_flags(drm_device_t * dev,
+			    uint32_t flags, uint32_t new_mask, uint32_t hint,
+			    int init, uint32_t * n_flags, uint32_t * n_mask)
+{
+	uint32_t new_flags = 0;
+	uint32_t new_props;
+	drm_bo_driver_t *driver = dev->driver->bo_driver;
+	drm_buffer_manager_t *bm = &dev->bm;
+	unsigned i;
+
+	/*
+	 * First adjust the mask to take away nonexistant memory types. 
+	 */
+
+	for (i = 0; i < DRM_BO_MEM_TYPES; ++i) {
+		if (!bm->use_type[i])
+			new_mask &= ~drm_bo_type_flags(i);
+	}
+
+	if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
+		DRM_ERROR
+		    ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
+		     "processes\n");
+		return -EPERM;
+	}
+	if (new_mask & DRM_BO_FLAG_BIND_CACHED) {
+		if (((new_mask & DRM_BO_FLAG_MEM_TT) &&
+		     !driver->cached[DRM_BO_MEM_TT]) &&
+		    ((new_mask & DRM_BO_FLAG_MEM_VRAM)
+		     && !driver->cached[DRM_BO_MEM_VRAM])) {
+			new_mask &= ~DRM_BO_FLAG_BIND_CACHED;
+		} else {
+			if (!driver->cached[DRM_BO_MEM_TT])
+				new_flags &= DRM_BO_FLAG_MEM_TT;
+			if (!driver->cached[DRM_BO_MEM_VRAM])
+				new_flags &= DRM_BO_FLAG_MEM_VRAM;
+		}
+	}
+
+	if ((new_mask & DRM_BO_FLAG_READ_CACHED) &&
+	    !(new_mask & DRM_BO_FLAG_BIND_CACHED)) {
+		if ((new_mask & DRM_BO_FLAG_NO_EVICT) &&
+		    !(new_mask & DRM_BO_FLAG_MEM_LOCAL)) {
+			DRM_ERROR
+			    ("Cannot read cached from a pinned VRAM / TT buffer\n");
+			return -EINVAL;
+		}
+	}
+
+	/*
+	 * Determine new memory location:
+	 */
+
+	if (!(flags & new_mask & DRM_BO_MASK_MEM) || init) {
+
+		new_flags = new_mask & DRM_BO_MASK_MEM;
+
+		if (!new_flags) {
+			DRM_ERROR("Invalid buffer object memory flags\n");
+			return -EINVAL;
+		}
+
+		if (new_flags & DRM_BO_FLAG_MEM_LOCAL) {
+			if ((hint & DRM_BO_HINT_AVOID_LOCAL) &&
+			    new_flags & (DRM_BO_FLAG_MEM_VRAM |
+					 DRM_BO_FLAG_MEM_TT)) {
+				new_flags &= ~DRM_BO_FLAG_MEM_LOCAL;
+			} else {
+				new_flags = DRM_BO_FLAG_MEM_LOCAL;
+			}
+		}
+		if (new_flags & DRM_BO_FLAG_MEM_TT) {
+			if ((new_mask & DRM_BO_FLAG_PREFER_VRAM) &&
+			    new_flags & DRM_BO_FLAG_MEM_VRAM) {
+				new_flags = DRM_BO_FLAG_MEM_VRAM;
+			} else {
+				new_flags = DRM_BO_FLAG_MEM_TT;
+			}
+		}
+	} else {
+		new_flags = flags & DRM_BO_MASK_MEM;
+	}
+
+	new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
+				DRM_BO_FLAG_READ);
+
+	if (!new_props) {
+		DRM_ERROR("Invalid buffer object rwx properties\n");
+		return -EINVAL;
+	}
+
+	new_flags |= new_mask & ~DRM_BO_MASK_MEM;
+
+	if (((flags ^ new_flags) & DRM_BO_FLAG_BIND_CACHED) &&
+	    (new_flags & DRM_BO_FLAG_NO_EVICT) &&
+	    (flags & (DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MEM_VRAM))) {
+		if (!(flags & DRM_BO_FLAG_CACHED)) {
+			DRM_ERROR
+			    ("Cannot change caching policy of pinned buffer\n");
+			return -EINVAL;
+		} else {
+			new_flags &= ~DRM_BO_FLAG_CACHED;
+		}
+	}
+
+	*n_flags = new_flags;
+	*n_mask = new_mask;
+	return 0;
+}
+
+/*
+ * Call dev->struct_mutex locked.
+ */
+
+drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv,
+					      uint32_t handle, int check_owner)
+{
+	drm_user_object_t *uo;
+	drm_buffer_object_t *bo;
+
+	uo = drm_lookup_user_object(priv, handle);
+
+	if (!uo || (uo->type != drm_buffer_type)) {
+		DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
+		return NULL;
+	}
+
+	if (check_owner && priv != uo->owner) {
+		if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
+			return NULL;
+	}
+
+	bo = drm_user_object_entry(uo, drm_buffer_object_t, base);
+	atomic_inc(&bo->usage);
+	return bo;
+}
+
+/*
+ * Call bo->mutex locked.
+ * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
+ * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
+ */
+
+static int drm_bo_quick_busy(drm_buffer_object_t * bo)
+{
+	drm_fence_object_t *fence = bo->fence;
+
+	BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
+	if (fence) {
+		drm_device_t *dev = bo->dev;
+		if (drm_fence_object_signaled(fence, bo->fence_type)) {
+			drm_fence_usage_deref_unlocked(dev, fence);
+			bo->fence = NULL;
+			return 0;
+		}
+		return 1;
+	}
+	return 0;
+}
+
+/*
+ * Call bo->mutex locked.
+ * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
+ */
+
+static int drm_bo_busy(drm_buffer_object_t * bo)
+{
+	drm_fence_object_t *fence = bo->fence;
+
+	BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
+	if (fence) {
+		drm_device_t *dev = bo->dev;
+		if (drm_fence_object_signaled(fence, bo->fence_type)) {
+			drm_fence_usage_deref_unlocked(dev, fence);
+			bo->fence = NULL;
+			return 0;
+		}
+		drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE);
+		if (drm_fence_object_signaled(fence, bo->fence_type)) {
+			drm_fence_usage_deref_unlocked(dev, fence);
+			bo->fence = NULL;
+			return 0;
+		}
+		return 1;
+	}
+	return 0;
+}
+
+static int drm_bo_read_cached(drm_buffer_object_t * bo)
+{
+	int ret = 0;
+
+	BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
+	if (bo->node_card)
+		ret = drm_bo_evict(bo, DRM_BO_MEM_VRAM, 1, 0);
+	if (ret)
+		return ret;
+	if (bo->node_ttm)
+		ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1, 0);
+	return ret;
+}
+
+/*
+ * Wait until a buffer is unmapped.
+ */
+
+static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait)
+{
+	int ret = 0;
+
+	if ((atomic_read(&bo->mapped) >= 0) && no_wait)
+		return -EBUSY;
+
+	DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
+		    atomic_read(&bo->mapped) == -1);
+
+	if (ret == -EINTR)
+		ret = -EAGAIN;
+
+	return ret;
+}
+
+static int drm_bo_check_unfenced(drm_buffer_object_t * bo)
+{
+	int ret;
+
+	mutex_lock(&bo->mutex);
+	ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
+	mutex_unlock(&bo->mutex);
+	return ret;
+}
+
+/*
+ * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
+ * Until then, we cannot really do anything with it except delete it.
+ * The unfenced list is a PITA, and the operations
+ * 1) validating
+ * 2) submitting commands
+ * 3) fencing
+ * Should really be an atomic operation. 
+ * We now "solve" this problem by keeping
+ * the buffer "unfenced" after validating, but before fencing.
+ */
+
+static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
+				int eagain_if_wait)
+{
+	int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
+	unsigned long _end = jiffies + 3 * DRM_HZ;
+
+	if (ret && no_wait)
+		return -EBUSY;
+	else if (!ret)
+		return 0;
+
+	do {
+		mutex_unlock(&bo->mutex);
+		DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
+			    !drm_bo_check_unfenced(bo));
+		mutex_lock(&bo->mutex);
+		if (ret == -EINTR)
+			return -EAGAIN;
+		if (ret) {
+			DRM_ERROR
+			    ("Error waiting for buffer to become fenced\n");
+			return ret;
+		}
+		ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
+	} while (ret && !time_after_eq(jiffies, _end));
+	if (ret) {
+		DRM_ERROR("Timeout waiting for buffer to become fenced\n");
+		return ret;
+	}
+	if (eagain_if_wait)
+		return -EAGAIN;
+
+	return 0;
+}
+
+/*
+ * Fill in the ioctl reply argument with buffer info.
+ * Bo locked. 
+ */
+
+static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
+				drm_bo_arg_reply_t * rep)
+{
+	rep->handle = bo->base.hash.key;
+	rep->flags = bo->flags;
+	rep->size = bo->num_pages * PAGE_SIZE;
+	rep->offset = bo->offset;
+
+	if (bo->ttm_object) {
+		rep->arg_handle = bo->ttm_object->map_list.user_token;
+	} else {
+		rep->arg_handle = 0;
+	}
+
+	rep->mask = bo->mask;
+	rep->buffer_start = bo->buffer_start;
+	rep->fence_flags = bo->fence_type;
+	rep->rep_flags = 0;
+	rep->page_alignment = bo->page_alignment;
+
+	if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
+		DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
+				DRM_BO_REP_BUSY);
+	}
+}
+
+/*
+ * Wait for buffer idle and register that we've mapped the buffer.
+ * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1, 
+ * so that if the client dies, the mapping is automatically 
+ * unregistered.
+ */
+
+static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
+				 uint32_t map_flags, unsigned hint,
+				 drm_bo_arg_reply_t * rep)
+{
+	drm_buffer_object_t *bo;
+	drm_device_t *dev = priv->head->dev;
+	int ret = 0;
+	int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
+
+	mutex_lock(&dev->struct_mutex);
+	bo = drm_lookup_buffer_object(priv, handle, 1);
+	mutex_unlock(&dev->struct_mutex);
+
+	if (!bo)
+		return -EINVAL;
+
+	mutex_lock(&bo->mutex);
+	if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
+		ret = drm_bo_wait_unfenced(bo, no_wait, 0);
+		if (ret)
+			goto out;
+	}
+
+	/*
+	 * If this returns true, we are currently unmapped.
+	 * We need to do this test, because unmapping can
+	 * be done without the bo->mutex held.
+	 */
+
+	while (1) {
+		if (atomic_inc_and_test(&bo->mapped)) {
+			if (no_wait && drm_bo_busy(bo)) {
+				atomic_dec(&bo->mapped);
+				ret = -EBUSY;
+				goto out;
+			}
+			ret = drm_bo_wait(bo, 0, 0, no_wait);
+			if (ret) {
+				atomic_dec(&bo->mapped);
+				goto out;
+			}
+
+			if ((map_flags & DRM_BO_FLAG_READ) &&
+			    (bo->flags & DRM_BO_FLAG_READ_CACHED) &&
+			    (!(bo->flags & DRM_BO_FLAG_CACHED))) {
+				drm_bo_read_cached(bo);
+			}
+			break;
+		} else if ((map_flags & DRM_BO_FLAG_READ) &&
+			   (bo->flags & DRM_BO_FLAG_READ_CACHED) &&
+			   (!(bo->flags & DRM_BO_FLAG_CACHED))) {
+
+			/*
+			 * We are already mapped with different flags.
+			 * need to wait for unmap.
+			 */
+
+			ret = drm_bo_wait_unmapped(bo, no_wait);
+			if (ret)
+				goto out;
+
+			continue;
+		}
+		break;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
+	mutex_unlock(&dev->struct_mutex);
+	if (ret) {
+		if (atomic_add_negative(-1, &bo->mapped))
+			DRM_WAKEUP(&bo->event_queue);
+
+	} else
+		drm_bo_fill_rep_arg(bo, rep);
+      out:
+	mutex_unlock(&bo->mutex);
+	drm_bo_usage_deref_unlocked(dev, bo);
+	return ret;
+}
+
+static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
+{
+	drm_device_t *dev = priv->head->dev;
+	drm_buffer_object_t *bo;
+	drm_ref_object_t *ro;
+	int ret = 0;
+
+	mutex_lock(&dev->struct_mutex);
+
+	bo = drm_lookup_buffer_object(priv, handle, 1);
+	if (!bo) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
+	if (!ro) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	drm_remove_ref_object(priv, ro);
+	drm_bo_usage_deref_locked(dev, bo);
+      out:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+/*
+ * Call struct-sem locked.
+ */
+
+static void drm_buffer_user_object_unmap(drm_file_t * priv,
+					 drm_user_object_t * uo,
+					 drm_ref_t action)
+{
+	drm_buffer_object_t *bo =
+	    drm_user_object_entry(uo, drm_buffer_object_t, base);
+
+	/*
+	 * We DON'T want to take the bo->lock here, because we want to
+	 * hold it when we wait for unmapped buffer.
+	 */
+
+	BUG_ON(action != _DRM_REF_TYPE1);
+
+	if (atomic_add_negative(-1, &bo->mapped))
+		DRM_WAKEUP(&bo->event_queue);
+}
+
+/*
+ * bo->mutex locked. 
+ */
+
+static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_flags,
+			      int no_wait, int force_no_move)
+{
+	int ret = 0;
+
+	/*
+	 * Flush outstanding fences.
+	 */
+	drm_bo_busy(bo);
+
+	/*
+	 * Make sure we're not mapped.
+	 */
+
+	ret = drm_bo_wait_unmapped(bo, no_wait);
+	if (ret)
+		return ret;
+
+	/*
+	 * Wait for outstanding fences.
+	 */
+
+	ret = drm_bo_wait(bo, 0, 0, no_wait);
+
+	if (ret == -EINTR)
+		return -EAGAIN;
+	if (ret)
+		return ret;
+
+	if (new_flags & DRM_BO_FLAG_MEM_TT) {
+		ret = drm_move_local_to_tt(bo, no_wait);
+		if (ret)
+			return ret;
+	} else {
+		drm_move_tt_to_local(bo, 0, force_no_move);
+	}
+
+	return 0;
+}
+
+/*
+ * bo locked.
+ */
+
+static int drm_buffer_object_validate(drm_buffer_object_t * bo,
+				      uint32_t new_flags,
+				      int move_unfenced, int no_wait)
+{
+	drm_device_t *dev = bo->dev;
+	drm_buffer_manager_t *bm = &dev->bm;
+	uint32_t flag_diff = (new_flags ^ bo->flags);
+	drm_bo_driver_t *driver = dev->driver->bo_driver;
+
+	int ret;
+
+	if (new_flags & DRM_BO_FLAG_MEM_VRAM) {
+		DRM_ERROR("Vram support not implemented yet\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", new_flags, bo->flags);
+	ret = driver->fence_type(new_flags, &bo->fence_class, &bo->fence_type);
+	if (ret) {
+		DRM_ERROR("Driver did not support given buffer permissions\n");
+		return ret;
+	}
+
+	/*
+	 * Move out if we need to change caching policy.
+	 */
+
+	if ((flag_diff & DRM_BO_FLAG_BIND_CACHED) &&
+	    !(bo->flags & DRM_BO_FLAG_MEM_LOCAL)) {
+		if (bo->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
+			DRM_ERROR("Cannot change caching policy of "
+				  "pinned buffer.\n");
+			return -EINVAL;
+		}
+		ret = drm_bo_move_buffer(bo, DRM_BO_FLAG_MEM_LOCAL, no_wait, 0);
+		if (ret) {
+			if (ret != -EAGAIN)
+				DRM_ERROR("Failed moving buffer.\n");
+			return ret;
+		}
+	}
+	DRM_MASK_VAL(bo->flags, DRM_BO_FLAG_BIND_CACHED, new_flags);
+	flag_diff = (new_flags ^ bo->flags);
+
+	/*
+	 * Check whether we dropped no_move policy, and in that case,
+	 * release reserved manager regions.
+	 */
+
+	if ((flag_diff & DRM_BO_FLAG_NO_MOVE) &&
+	    !(new_flags & DRM_BO_FLAG_NO_MOVE)) {
+		mutex_lock(&dev->struct_mutex);
+		if (bo->node_ttm) {
+			drm_mm_put_block(bo->node_ttm);
+			bo->node_ttm = NULL;
+		}
+		if (bo->node_card) {
+			drm_mm_put_block(bo->node_card);
+			bo->node_card = NULL;
+		}
+		mutex_unlock(&dev->struct_mutex);
+	}
+
+	/*
+	 * Check whether we need to move buffer.
+	 */
+
+	if ((bo->type != drm_bo_type_fake) && (flag_diff & DRM_BO_MASK_MEM)) {
+		ret = drm_bo_move_buffer(bo, new_flags, no_wait, 1);
+		if (ret) {
+			if (ret != -EAGAIN)
+				DRM_ERROR("Failed moving buffer.\n");
+			return ret;
+		}
+	}
+
+	if (move_unfenced) {
+
+		/*
+		 * Place on unfenced list.
+		 */
+
+		DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
+				_DRM_BO_FLAG_UNFENCED);
+		mutex_lock(&dev->struct_mutex);
+		list_del(&bo->lru_ttm);
+		list_add_tail(&bo->lru_ttm, &bm->unfenced);
+		list_del_init(&bo->lru_card);
+		mutex_unlock(&dev->struct_mutex);
+	} else {
+
+		mutex_lock(&dev->struct_mutex);
+		list_del_init(&bo->lru_ttm);
+		list_del_init(&bo->lru_card);
+		drm_bo_add_to_lru(bo, bm);
+		mutex_unlock(&dev->struct_mutex);
+	}
+
+	bo->flags = new_flags;
+	return 0;
+}
+
+static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
+				  uint32_t flags, uint32_t mask, uint32_t hint,
+				  drm_bo_arg_reply_t * rep)
+{
+	drm_buffer_object_t *bo;
+	drm_device_t *dev = priv->head->dev;
+	int ret;
+	int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
+	uint32_t new_flags;
+
+	bo = drm_lookup_buffer_object(priv, handle, 1);
+	if (!bo) {
+		return -EINVAL;
+	}
+
+	mutex_lock(&bo->mutex);
+	ret = drm_bo_wait_unfenced(bo, no_wait, 0);
+
+	if (ret)
+		goto out;
+
+	ret = drm_bo_new_flags(dev, bo->flags,
+			       (flags & mask) | (bo->mask & ~mask), hint,
+			       0, &new_flags, &bo->mask);
+
+	if (ret)
+		goto out;
+
+	ret =
+	    drm_buffer_object_validate(bo, new_flags,
+				       !(hint & DRM_BO_HINT_DONT_FENCE),
+				       no_wait);
+	drm_bo_fill_rep_arg(bo, rep);
+
+      out:
+
+	mutex_unlock(&bo->mutex);
+	drm_bo_usage_deref_unlocked(dev, bo);
+	return ret;
+}
+
+static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
+			      drm_bo_arg_reply_t * rep)
+{
+	drm_buffer_object_t *bo;
+
+	bo = drm_lookup_buffer_object(priv, handle, 1);
+	if (!bo) {
+		return -EINVAL;
+	}
+	mutex_lock(&bo->mutex);
+	if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
+		(void)drm_bo_busy(bo);
+	drm_bo_fill_rep_arg(bo, rep);
+	mutex_unlock(&bo->mutex);
+	drm_bo_usage_deref_unlocked(bo->dev, bo);
+	return 0;
+}
+
+static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
+			      uint32_t hint, drm_bo_arg_reply_t * rep)
+{
+	drm_buffer_object_t *bo;
+	int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
+	int ret;
+
+	bo = drm_lookup_buffer_object(priv, handle, 1);
+	if (!bo) {
+		return -EINVAL;
+	}
+
+	mutex_lock(&bo->mutex);
+	ret = drm_bo_wait_unfenced(bo, no_wait, 0);
+	if (ret)
+		goto out;
+	ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
+	if (ret)
+		goto out;
+
+	drm_bo_fill_rep_arg(bo, rep);
+
+      out:
+	mutex_unlock(&bo->mutex);
+	drm_bo_usage_deref_unlocked(bo->dev, bo);
+	return ret;
+}
+
+/*
+ * Call bo->mutex locked.
+ */
+
+static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo)
+{
+	drm_device_t *dev = bo->dev;
+	drm_ttm_object_t *to = NULL;
+	int ret = 0;
+	uint32_t ttm_flags = 0;
+
+	bo->ttm_object = NULL;
+	bo->ttm = NULL;
+
+	switch (bo->type) {
+	case drm_bo_type_dc:
+		mutex_lock(&dev->struct_mutex);
+		ret = drm_ttm_object_create(dev, bo->num_pages * PAGE_SIZE,
+					    ttm_flags, &to);
+		mutex_unlock(&dev->struct_mutex);
+		break;
+	case drm_bo_type_user:
+	case drm_bo_type_fake:
+		break;
+	default:
+		DRM_ERROR("Illegal buffer object type\n");
+		ret = -EINVAL;
+		break;
+	}
+
+	if (ret) {
+		return ret;
+	}
+
+	if (to) {
+		bo->ttm_object = to;
+		bo->ttm = drm_ttm_from_object(to);
+	}
+	return ret;
+}
+
+int drm_buffer_object_create(drm_file_t * priv,
+			     unsigned long size,
+			     drm_bo_type_t type,
+			     uint32_t mask,
+			     uint32_t hint,
+			     uint32_t page_alignment,
+			     unsigned long buffer_start,
+			     drm_buffer_object_t ** buf_obj)
+{
+	drm_device_t *dev = priv->head->dev;
+	drm_buffer_manager_t *bm = &dev->bm;
+	drm_buffer_object_t *bo;
+	int ret = 0;
+	uint32_t new_flags;
+	unsigned long num_pages;
+
+	if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
+		DRM_ERROR("Invalid buffer object start.\n");
+		return -EINVAL;
+	}
+	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	if (num_pages == 0) {
+		DRM_ERROR("Illegal buffer object size.\n");
+		return -EINVAL;
+	}
+
+	bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
+
+	if (!bo)
+		return -ENOMEM;
+
+	mutex_init(&bo->mutex);
+	mutex_lock(&bo->mutex);
+
+	atomic_set(&bo->usage, 1);
+	atomic_set(&bo->mapped, -1);
+	DRM_INIT_WAITQUEUE(&bo->event_queue);
+	INIT_LIST_HEAD(&bo->lru_ttm);
+	INIT_LIST_HEAD(&bo->lru_card);
+	INIT_LIST_HEAD(&bo->ddestroy);
+	bo->dev = dev;
+	bo->type = type;
+	bo->num_pages = num_pages;
+	bo->node_card = NULL;
+	bo->node_ttm = NULL;
+	bo->page_alignment = page_alignment;
+	if (bo->type == drm_bo_type_fake) {
+		bo->offset = buffer_start;
+		bo->buffer_start = 0;
+	} else {
+		bo->buffer_start = buffer_start;
+	}
+	bo->priv_flags = 0;
+	bo->flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
+	atomic_inc(&bm->count);
+	ret = drm_bo_new_flags(dev, bo->flags, mask, hint,
+			       1, &new_flags, &bo->mask);
+	if (ret)
+		goto out_err;
+	ret = drm_bo_add_ttm(priv, bo);
+	if (ret)
+		goto out_err;
+
+	ret = drm_buffer_object_validate(bo, new_flags, 0,
+					 hint & DRM_BO_HINT_DONT_BLOCK);
+	if (ret)
+		goto out_err;
+
+	mutex_unlock(&bo->mutex);
+	*buf_obj = bo;
+	return 0;
+
+      out_err:
+	mutex_unlock(&bo->mutex);
+	drm_bo_usage_deref_unlocked(dev, bo);
+	return ret;
+}
+
+static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo,
+				  int shareable)
+{
+	drm_device_t *dev = priv->head->dev;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	ret = drm_add_user_object(priv, &bo->base, shareable);
+	if (ret)
+		goto out;
+
+	bo->base.remove = drm_bo_base_deref_locked;
+	bo->base.type = drm_buffer_type;
+	bo->base.ref_struct_locked = NULL;
+	bo->base.unref = drm_buffer_user_object_unmap;
+
+      out:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+static int drm_bo_lock_test(drm_device_t * dev, struct file *filp)
+{
+	LOCK_TEST_WITH_RETURN(dev, filp);
+	return 0;
+}
+
+int drm_bo_ioctl(DRM_IOCTL_ARGS)
+{
+	DRM_DEVICE;
+	drm_bo_arg_t arg;
+	drm_bo_arg_request_t *req = &arg.d.req;
+	drm_bo_arg_reply_t rep;
+	unsigned long next;
+	drm_user_object_t *uo;
+	drm_buffer_object_t *entry;
+
+	if (!dev->bm.initialized) {
+		DRM_ERROR("Buffer object manager is not initialized.\n");
+		return -EINVAL;
+	}
+
+	do {
+		DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+
+		if (arg.handled) {
+			data = arg.next;
+			continue;
+		}
+
+		rep.ret = 0;
+		switch (req->op) {
+		case drm_bo_create:
+			rep.ret =
+			    drm_buffer_object_create(priv, req->size,
+						     req->type,
+						     req->mask,
+						     req->hint,
+						     req->page_alignment,
+						     req->buffer_start, &entry);
+			if (rep.ret)
+				break;
+
+			rep.ret =
+			    drm_bo_add_user_object(priv, entry,
+						   req->
+						   mask &
+						   DRM_BO_FLAG_SHAREABLE);
+			if (rep.ret)
+				drm_bo_usage_deref_unlocked(dev, entry);
+
+			if (rep.ret)
+				break;
+
+			mutex_lock(&entry->mutex);
+			drm_bo_fill_rep_arg(entry, &rep);
+			mutex_unlock(&entry->mutex);
+			break;
+		case drm_bo_unmap:
+			rep.ret = drm_buffer_object_unmap(priv, req->handle);
+			break;
+		case drm_bo_map:
+			rep.ret = drm_buffer_object_map(priv, req->handle,
+							req->mask,
+							req->hint, &rep);
+			break;
+		case drm_bo_destroy:
+			mutex_lock(&dev->struct_mutex);
+			uo = drm_lookup_user_object(priv, req->handle);
+			if (!uo || (uo->type != drm_buffer_type)
+			    || uo->owner != priv) {
+				mutex_unlock(&dev->struct_mutex);
+				rep.ret = -EINVAL;
+				break;
+			}
+			rep.ret = drm_remove_user_object(priv, uo);
+			mutex_unlock(&dev->struct_mutex);
+			break;
+		case drm_bo_reference:
+			rep.ret = drm_user_object_ref(priv, req->handle,
+						      drm_buffer_type, &uo);
+			if (rep.ret)
+				break;
+			mutex_lock(&dev->struct_mutex);
+			uo = drm_lookup_user_object(priv, req->handle);
+			entry =
+			    drm_user_object_entry(uo, drm_buffer_object_t,
+						  base);
+			atomic_dec(&entry->usage);
+			mutex_unlock(&dev->struct_mutex);
+			mutex_lock(&entry->mutex);
+			drm_bo_fill_rep_arg(entry, &rep);
+			mutex_unlock(&entry->mutex);
+			break;
+		case drm_bo_unreference:
+			rep.ret = drm_user_object_unref(priv, req->handle,
+							drm_buffer_type);
+			break;
+		case drm_bo_validate:
+			rep.ret = drm_bo_lock_test(dev, filp);
+
+			if (rep.ret)
+				break;
+			rep.ret =
+			    drm_bo_handle_validate(priv, req->handle, req->mask,
+						   req->arg_handle, req->hint,
+						   &rep);
+			break;
+		case drm_bo_fence:
+			rep.ret = drm_bo_lock_test(dev, filp);
+			if (rep.ret)
+				break;
+			 /**/ break;
+		case drm_bo_info:
+			rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
+			break;
+		case drm_bo_wait_idle:
+			rep.ret = drm_bo_handle_wait(priv, req->handle,
+						     req->hint, &rep);
+			break;
+		case drm_bo_ref_fence:
+			rep.ret = -EINVAL;
+			DRM_ERROR("Function is not implemented yet.\n");
+		default:
+			rep.ret = -EINVAL;
+		}
+		next = arg.next;
+
+		/*
+		 * A signal interrupted us. Make sure the ioctl is restartable.
+		 */
+
+		if (rep.ret == -EAGAIN)
+			return -EAGAIN;
+
+		arg.handled = 1;
+		arg.d.rep = rep;
+		DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
+		data = next;
+	} while (data);
+	return 0;
+}
+
+/*
+ * dev->struct_sem locked.
+ */
+
+static int drm_bo_force_list_clean(drm_device_t * dev,
+				   struct list_head *head,
+				   unsigned mem_type,
+				   int force_no_move, int allow_errors)
+{
+	drm_buffer_manager_t *bm = &dev->bm;
+	struct list_head *list, *next, *prev;
+	drm_buffer_object_t *entry;
+	int ret;
+	int clean;
+
+      retry:
+	clean = 1;
+	list_for_each_safe(list, next, head) {
+		prev = list->prev;
+		entry = drm_bo_entry(list, mem_type);
+		atomic_inc(&entry->usage);
+		mutex_unlock(&dev->struct_mutex);
+		mutex_lock(&entry->mutex);
+		mutex_lock(&dev->struct_mutex);
+
+		if (prev != list->prev || next != list->next) {
+			mutex_unlock(&entry->mutex);
+			drm_bo_usage_deref_locked(dev, entry);
+			goto retry;
+		}
+		if (drm_bo_mm_node(entry, mem_type)) {
+			clean = 0;
+
+			/*
+			 * Expire the fence.
+			 */
+
+			mutex_unlock(&dev->struct_mutex);
+			if (entry->fence && bm->nice_mode) {
+				unsigned long _end = jiffies + 3 * DRM_HZ;
+				do {
+					ret = drm_bo_wait(entry, 0, 1, 0);
+					if (ret && allow_errors) {
+						if (ret == -EINTR)
+							ret = -EAGAIN;
+						goto out_err;
+					}
+				} while (ret && !time_after_eq(jiffies, _end));
+
+				if (entry->fence) {
+					bm->nice_mode = 0;
+					DRM_ERROR("Detected GPU hang or "
+						  "fence manager was taken down. "
+						  "Evicting waiting buffers\n");
+				}
+			}
+			if (entry->fence) {
+				drm_fence_usage_deref_unlocked(dev,
+							       entry->fence);
+				entry->fence = NULL;
+			}
+
+			DRM_MASK_VAL(entry->priv_flags, _DRM_BO_FLAG_UNFENCED,
+				     0);
+
+			if (force_no_move) {
+				DRM_MASK_VAL(entry->flags, DRM_BO_FLAG_NO_MOVE,
+					     0);
+			}
+			if (entry->flags & DRM_BO_FLAG_NO_EVICT) {
+				DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
+					  "cleanup. Removing flag and evicting.\n");
+				entry->flags &= ~DRM_BO_FLAG_NO_EVICT;
+				entry->mask &= ~DRM_BO_FLAG_NO_EVICT;
+			}
+
+			ret = drm_bo_evict(entry, mem_type, 1, force_no_move);
+			if (ret) {
+				if (allow_errors) {
+					goto out_err;
+				} else {
+					DRM_ERROR("Aargh. Eviction failed.\n");
+				}
+			}
+			mutex_lock(&dev->struct_mutex);
+		}
+		mutex_unlock(&entry->mutex);
+		drm_bo_usage_deref_locked(dev, entry);
+		if (prev != list->prev || next != list->next) {
+			goto retry;
+		}
+	}
+	if (!clean)
+		goto retry;
+	return 0;
+      out_err:
+	mutex_unlock(&entry->mutex);
+	drm_bo_usage_deref_unlocked(dev, entry);
+	mutex_lock(&dev->struct_mutex);
+	return ret;
+}
+
+int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
+{
+	drm_buffer_manager_t *bm = &dev->bm;
+	int ret = -EINVAL;
+
+	if (mem_type >= DRM_BO_MEM_TYPES) {
+		DRM_ERROR("Illegal memory type %d\n", mem_type);
+		return ret;
+	}
+
+	if (!bm->has_type[mem_type]) {
+		DRM_ERROR("Trying to take down uninitialized "
+			  "memory manager type\n");
+		return ret;
+	}
+	bm->use_type[mem_type] = 0;
+	bm->has_type[mem_type] = 0;
+
+	ret = 0;
+	if (mem_type > 0) {
+
+		/*
+		 * Throw out unfenced buffers.
+		 */
+
+		drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 1, 0);
+
+		/*
+		 * Throw out evicted no-move buffers.
+		 */
+
+		drm_bo_force_list_clean(dev, &bm->pinned[DRM_BO_MEM_LOCAL],
+					mem_type, 1, 0);
+		drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 1,
+					0);
+		drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 1,
+					0);
+
+		if (drm_mm_clean(&bm->manager[mem_type])) {
+			drm_mm_takedown(&bm->manager[mem_type]);
+		} else {
+			ret = -EBUSY;
+		}
+	}
+
+	return ret;
+}
+
+static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
+{
+	int ret;
+	drm_buffer_manager_t *bm = &dev->bm;
+
+	if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
+		DRM_ERROR("Illegal memory manager memory type %u,\n", mem_type);
+		return -EINVAL;
+	}
+
+	ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1);
+	if (ret)
+		return ret;
+	ret = drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 0, 1);
+	if (ret)
+		return ret;
+	ret =
+	    drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 0, 1);
+	return ret;
+}
+
+static int drm_bo_init_mm(drm_device_t * dev,
+			  unsigned type,
+			  unsigned long p_offset, unsigned long p_size)
+{
+	drm_buffer_manager_t *bm = &dev->bm;
+	int ret = -EINVAL;
+
+	if (type >= DRM_BO_MEM_TYPES) {
+		DRM_ERROR("Illegal memory type %d\n", type);
+		return ret;
+	}
+	if (bm->has_type[type]) {
+		DRM_ERROR("Memory manager already initialized for type %d\n",
+			  type);
+		return ret;
+	}
+
+	ret = 0;
+	if (type != DRM_BO_MEM_LOCAL) {
+		if (!p_size) {
+			DRM_ERROR("Zero size memory manager type %d\n", type);
+			return ret;
+		}
+		ret = drm_mm_init(&bm->manager[type], p_offset, p_size);
+		if (ret)
+			return ret;
+	}
+	bm->has_type[type] = 1;
+	bm->use_type[type] = 1;
+
+	INIT_LIST_HEAD(&bm->lru[type]);
+	INIT_LIST_HEAD(&bm->pinned[type]);
+
+	return 0;
+}
+
+/*
+ * This is called from lastclose, so we don't need to bother about
+ * any clients still running when we set the initialized flag to zero.
+ */
+
+int drm_bo_driver_finish(drm_device_t * dev)
+{
+	drm_buffer_manager_t *bm = &dev->bm;
+	int ret = 0;
+	unsigned i = DRM_BO_MEM_TYPES;
+
+	mutex_lock(&dev->bm.init_mutex);
+	mutex_lock(&dev->struct_mutex);
+
+	if (!bm->initialized)
+		goto out;
+	bm->initialized = 0;
+
+	while (i--) {
+		if (bm->has_type[i]) {
+			bm->use_type[i] = 0;
+			if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
+				ret = -EBUSY;
+				DRM_ERROR("DRM memory manager type %d "
+					  "is not clean.\n", i);
+			}
+			bm->has_type[i] = 0;
+		}
+	}
+	mutex_unlock(&dev->struct_mutex);
+	if (!cancel_delayed_work(&bm->wq)) {
+		flush_scheduled_work();
+	}
+	mutex_lock(&dev->struct_mutex);
+	drm_bo_delayed_delete(dev, 1);
+	if (list_empty(&bm->ddestroy)) {
+		DRM_DEBUG("Delayed destroy list was clean\n");
+	}
+	if (list_empty(&bm->lru[0])) {
+		DRM_DEBUG("Swap list was clean\n");
+	}
+	if (list_empty(&bm->pinned[0])) {
+		DRM_DEBUG("NO_MOVE list was clean\n");
+	}
+	if (list_empty(&bm->unfenced)) {
+		DRM_DEBUG("Unfenced list was clean\n");
+	}
+      out:
+	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev->bm.init_mutex);
+	return ret;
+}
+
+int drm_bo_driver_init(drm_device_t * dev)
+{
+	drm_bo_driver_t *driver = dev->driver->bo_driver;
+	drm_buffer_manager_t *bm = &dev->bm;
+	int ret = -EINVAL;
+
+	mutex_lock(&dev->bm.init_mutex);
+	mutex_lock(&dev->struct_mutex);
+	if (!driver)
+		goto out_unlock;
+
+	/*
+	 * Initialize the system memory buffer type.
+	 * Other types need to be driver / IOCTL initialized.
+	 */
+
+	ret = drm_bo_init_mm(dev, 0, 0, 0);
+	if (ret)
+		goto out_unlock;
+
+	INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
+	bm->initialized = 1;
+	bm->nice_mode = 1;
+	atomic_set(&bm->count, 0);
+	bm->cur_pages = 0;
+	INIT_LIST_HEAD(&bm->unfenced);
+	INIT_LIST_HEAD(&bm->ddestroy);
+      out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev->bm.init_mutex);
+	return ret;
+}
+
+EXPORT_SYMBOL(drm_bo_driver_init);
+
+int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
+{
+	DRM_DEVICE;
+
+	int ret = 0;
+	drm_mm_init_arg_t arg;
+	drm_buffer_manager_t *bm = &dev->bm;
+	drm_bo_driver_t *driver = dev->driver->bo_driver;
+
+	if (!driver) {
+		DRM_ERROR("Buffer objects are not supported by this driver\n");
+		return -EINVAL;
+	}
+
+	DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+
+	switch (arg.req.op) {
+	case mm_init:
+		ret = -EINVAL;
+		mutex_lock(&dev->bm.init_mutex);
+		mutex_lock(&dev->struct_mutex);
+		if (!bm->initialized) {
+			DRM_ERROR("DRM memory manager was not initialized.\n");
+			break;
+		}
+		if (arg.req.mem_type == 0) {
+			DRM_ERROR
+			    ("System memory buffers already initialized.\n");
+			break;
+		}
+		ret = drm_bo_init_mm(dev, arg.req.mem_type,
+				     arg.req.p_offset, arg.req.p_size);
+		break;
+	case mm_takedown:
+		LOCK_TEST_WITH_RETURN(dev, filp);
+		mutex_lock(&dev->bm.init_mutex);
+		mutex_lock(&dev->struct_mutex);
+		ret = -EINVAL;
+		if (!bm->initialized) {
+			DRM_ERROR("DRM memory manager was not initialized\n");
+			break;
+		}
+		if (arg.req.mem_type == 0) {
+			DRM_ERROR("No takedown for System memory buffers.\n");
+			break;
+		}
+		ret = 0;
+		if (drm_bo_clean_mm(dev, arg.req.mem_type)) {
+			DRM_ERROR("Memory manager type %d not clean. "
+				  "Delaying takedown\n", arg.req.mem_type);
+		}
+		break;
+	case mm_lock:
+		LOCK_TEST_WITH_RETURN(dev, filp);
+		mutex_lock(&dev->bm.init_mutex);
+		mutex_lock(&dev->struct_mutex);
+		ret = drm_bo_lock_mm(dev, arg.req.mem_type);
+		break;
+	case mm_unlock:
+		LOCK_TEST_WITH_RETURN(dev, filp);
+		mutex_lock(&dev->bm.init_mutex);
+		mutex_lock(&dev->struct_mutex);
+		ret = 0;
+		break;
+	default:
+		DRM_ERROR("Function not implemented yet\n");
+		return -EINVAL;
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev->bm.init_mutex);
+	if (ret)
+		return ret;
+
+	DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
+	return 0;
+}
diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c
index a2a3dbf..d6ebc8d 100644
--- a/linux-core/drm_bufs.c
+++ b/linux-core/drm_bufs.c
@@ -80,14 +80,14 @@
 
 	if (!use_hashed_handle) {
 		int ret;
-		hash->key = user_token;
+		hash->key = user_token >> PAGE_SHIFT;
 		ret = drm_ht_insert_item(&dev->map_hash, hash);
 		if (ret != -EINVAL) 
 			return ret;
 	}
 	return drm_ht_just_insert_please(&dev->map_hash, hash, 
 					 user_token, 32 - PAGE_SHIFT - 3,
-					 PAGE_SHIFT, DRM_MAP_HASH_OFFSET);
+					 0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT);
 }
 
 /**
@@ -301,7 +301,7 @@
 		return ret;
 	}
 
-	list->user_token = list->hash.key;
+	list->user_token = list->hash.key << PAGE_SHIFT;
 	mutex_unlock(&dev->struct_mutex);
 
 	*maplist = list;
@@ -386,7 +386,8 @@
 
 		if (r_list->map == map) {
 			list_del(list);
-			drm_ht_remove_key(&dev->map_hash, r_list->user_token);
+			drm_ht_remove_key(&dev->map_hash, 
+					  r_list->user_token >> PAGE_SHIFT);
 			drm_free(list, sizeof(*list), DRM_MEM_MAPS);
 			break;
 		}
@@ -422,6 +423,8 @@
 		dmah.size = map->size;
 		__drm_pci_free(dev, &dmah);
 		break;
+	case _DRM_TTM:
+		BUG_ON(1);
 	}
 	drm_free(map, sizeof(*map), DRM_MEM_MAPS);
 
diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c
new file mode 100644
index 0000000..b466f8b
--- /dev/null
+++ b/linux-core/drm_compat.c
@@ -0,0 +1,434 @@
+/**************************************************************************
+ * 
+ * This kernel module is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * 
+ **************************************************************************/
+/*
+ * This code provides access to unexported mm kernel features. It is necessary
+ * to use the new DRM memory manager code with kernels that don't support it
+ * directly.
+ *
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ *          Linux kernel mm subsystem authors. 
+ *          (Most code taken from there).
+ */
+
+#include "drmP.h"
+
+#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+
+/*
+ * These have bad performance in the AGP module for the indicated kernel versions.
+ */
+
+int drm_map_page_into_agp(struct page *page)
+{
+        int i;
+        i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
+        /* Caller's responsibility to call global_flush_tlb() for
+         * performance reasons */
+        return i;
+}
+
+int drm_unmap_page_from_agp(struct page *page)
+{
+        int i;
+        i = change_page_attr(page, 1, PAGE_KERNEL);
+        /* Caller's responsibility to call global_flush_tlb() for
+         * performance reasons */
+        return i;
+}
+#endif 
+
+
+#if  (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
+
+/*
+ * The protection map was exported in 2.6.19
+ */
+
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+#ifdef MODULE
+	static pgprot_t drm_protection_map[16] = {
+		__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
+		__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
+	};
+
+	return drm_protection_map[vm_flags & 0x0F];
+#else
+	extern pgprot_t protection_map[];
+	return protection_map[vm_flags & 0x0F];
+#endif
+};
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+
+/*
+ * vm code for kernels below 2,6,15 in which version a major vm write
+ * occured. This implement a simple straightforward 
+ * version similar to what's going to be
+ * in kernel 2.6.20+?
+ */ 
+
+static int drm_pte_is_clear(struct vm_area_struct *vma,
+			    unsigned long addr)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	int ret = 1;
+	pte_t *pte;
+	pmd_t *pmd;
+	pud_t *pud;
+	pgd_t *pgd;
+	
+
+	spin_lock(&mm->page_table_lock);
+	pgd = pgd_offset(mm, addr);
+	if (pgd_none(*pgd))
+		goto unlock;
+	pud = pud_offset(pgd, addr);
+        if (pud_none(*pud))
+		goto unlock;
+	pmd = pmd_offset(pud, addr);
+	if (pmd_none(*pmd))
+		goto unlock;
+	pte = pte_offset_map(pmd, addr);
+	if (!pte)
+		goto unlock;
+	ret = pte_none(*pte);
+	pte_unmap(pte);
+ unlock:	
+	spin_unlock(&mm->page_table_lock);
+	return ret;
+}
+	
+int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 
+		  unsigned long pfn, pgprot_t pgprot)
+{
+	int ret;
+	if (!drm_pte_is_clear(vma, addr))
+		return -EBUSY;
+
+	ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, pgprot);
+	return ret;
+}
+
+static struct {
+	spinlock_t lock;
+	struct page *dummy_page;
+	atomic_t present;
+} drm_np_retry = 
+{SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
+
+struct page * get_nopage_retry(void)
+{
+	if (atomic_read(&drm_np_retry.present) == 0) {
+		struct page *page = alloc_page(GFP_KERNEL);
+		if (!page)
+			return NOPAGE_OOM;
+		spin_lock(&drm_np_retry.lock);
+		drm_np_retry.dummy_page = page;
+		atomic_set(&drm_np_retry.present,1);
+		spin_unlock(&drm_np_retry.lock);
+	}
+	get_page(drm_np_retry.dummy_page);
+	return drm_np_retry.dummy_page;
+}
+
+void free_nopage_retry(void)
+{
+	if (atomic_read(&drm_np_retry.present) == 1) {
+		spin_lock(&drm_np_retry.lock);
+		__free_page(drm_np_retry.dummy_page);
+		drm_np_retry.dummy_page = NULL;
+		atomic_set(&drm_np_retry.present, 0);
+		spin_unlock(&drm_np_retry.lock);
+	}
+}
+
+struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
+			       unsigned long address, 
+			       int *type)
+{
+	struct fault_data data;
+
+	if (type)
+		*type = VM_FAULT_MINOR;
+
+	data.address = address;
+	data.vma = vma;
+	drm_vm_ttm_fault(vma, &data);
+	switch (data.type) {
+	case VM_FAULT_OOM:
+		return NOPAGE_OOM;
+	case VM_FAULT_SIGBUS:
+		return NOPAGE_SIGBUS;
+	default:
+		break;
+	}
+
+	return NOPAGE_REFAULT;
+}
+
+#endif
+
+#ifdef DRM_ODD_MM_COMPAT
+
+/*
+ * VM compatibility code for 2.6.15-2.6.19(?). This code implements a complicated
+ * workaround for a single BUG statement in do_no_page in these versions. The
+ * tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
+ * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to 
+ * check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
+ * fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
+ * release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
+ * phew.
+ */
+
+typedef struct p_mm_entry {
+	struct list_head head;
+	struct mm_struct *mm;
+	atomic_t refcount;
+        int locked;
+} p_mm_entry_t;
+
+typedef struct vma_entry {
+	struct list_head head;
+	struct vm_area_struct *vma;
+} vma_entry_t;
+
+
+struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
+			       unsigned long address, 
+			       int *type)
+{
+	drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
+	unsigned long page_offset;
+	struct page *page;
+	drm_ttm_t *ttm; 
+	drm_buffer_manager_t *bm;
+	drm_device_t *dev;
+
+	/*
+	 * FIXME: Check can't map aperture flag.
+	 */
+
+	if (type)
+		*type = VM_FAULT_MINOR;
+
+	if (!map) 
+		return NOPAGE_OOM;
+
+	if (address > vma->vm_end) 
+		return NOPAGE_SIGBUS;
+
+	ttm = (drm_ttm_t *) map->offset;	
+	dev = ttm->dev;
+	mutex_lock(&dev->struct_mutex);
+	drm_fixup_ttm_caching(ttm);
+	BUG_ON(ttm->page_flags & DRM_TTM_PAGE_UNCACHED);
+
+	bm = &dev->bm;
+	page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
+	page = ttm->pages[page_offset];
+
+	if (!page) {
+		if (drm_alloc_memctl(PAGE_SIZE)) {
+			page = NOPAGE_OOM;
+			goto out;
+		}
+		page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
+		if (!page) {
+		        drm_free_memctl(PAGE_SIZE);
+			page = NOPAGE_OOM;
+			goto out;
+		}
+		++bm->cur_pages;
+		SetPageLocked(page);
+	}
+
+	get_page(page);
+ out:
+	mutex_unlock(&dev->struct_mutex);
+	return page;
+}
+
+
+
+
+int drm_ttm_map_bound(struct vm_area_struct *vma)
+{
+	drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
+	drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
+	int ret = 0;
+
+	if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) {
+		unsigned long pfn = ttm->aper_offset + 
+			(ttm->be->aperture_base >> PAGE_SHIFT);
+		pgprot_t pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
+		
+		ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
+					 vma->vm_end - vma->vm_start,
+					 pgprot);
+	}
+	return ret;
+}
+	
+
+int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
+{
+	p_mm_entry_t *entry, *n_entry;
+	vma_entry_t *v_entry;
+	drm_local_map_t *map = (drm_local_map_t *)
+		vma->vm_private_data;
+	struct mm_struct *mm = vma->vm_mm;
+
+	v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_TTM);
+	if (!v_entry) {
+		DRM_ERROR("Allocation of vma pointer entry failed\n");
+		return -ENOMEM;
+	}
+	v_entry->vma = vma;
+	map->handle = (void *) v_entry;
+	list_add_tail(&v_entry->head, &ttm->vma_list);
+
+	list_for_each_entry(entry, &ttm->p_mm_list, head) {
+		if (mm == entry->mm) {
+			atomic_inc(&entry->refcount);
+			return 0;
+		} else if ((unsigned long)mm < (unsigned long)entry->mm) ;
+	}
+
+	n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_TTM);
+	if (!n_entry) {
+		DRM_ERROR("Allocation of process mm pointer entry failed\n");
+		return -ENOMEM;
+	}
+	INIT_LIST_HEAD(&n_entry->head);
+	n_entry->mm = mm;
+	n_entry->locked = 0;
+	atomic_set(&n_entry->refcount, 0);
+	list_add_tail(&n_entry->head, &entry->head);
+
+	return 0;
+}
+
+void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
+{
+	p_mm_entry_t *entry, *n;
+	vma_entry_t *v_entry, *v_n;
+	int found = 0;
+	struct mm_struct *mm = vma->vm_mm;
+
+	list_for_each_entry_safe(v_entry, v_n, &ttm->vma_list, head) {
+		if (v_entry->vma == vma) {
+			found = 1;
+			list_del(&v_entry->head);
+			drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_TTM);
+			break;
+		}
+	}
+	BUG_ON(!found);
+
+	list_for_each_entry_safe(entry, n, &ttm->p_mm_list, head) {
+		if (mm == entry->mm) {
+			if (atomic_add_negative(-1, &entry->refcount)) {
+				list_del(&entry->head);
+				BUG_ON(entry->locked);
+				drm_ctl_free(entry, sizeof(*entry), DRM_MEM_TTM);
+			}
+			return;
+		}
+	}
+	BUG_ON(1);
+}
+
+
+
+int drm_ttm_lock_mm(drm_ttm_t * ttm)
+{
+	p_mm_entry_t *entry;
+	int lock_ok = 1;
+	
+	list_for_each_entry(entry, &ttm->p_mm_list, head) {
+		BUG_ON(entry->locked);
+		if (!down_write_trylock(&entry->mm->mmap_sem)) {
+			lock_ok = 0;
+			break;
+		}
+		entry->locked = 1;
+	}
+
+	if (lock_ok)
+		return 0;
+
+	list_for_each_entry(entry, &ttm->p_mm_list, head) {
+		if (!entry->locked) 
+			break;
+		up_write(&entry->mm->mmap_sem);
+		entry->locked = 0;
+	}
+
+	/*
+	 * Possible deadlock. Try again. Our callers should handle this
+	 * and restart.
+	 */
+
+	return -EAGAIN;
+}
+
+void drm_ttm_unlock_mm(drm_ttm_t * ttm)
+{
+	p_mm_entry_t *entry;
+	
+	list_for_each_entry(entry, &ttm->p_mm_list, head) {
+		BUG_ON(!entry->locked);
+		up_write(&entry->mm->mmap_sem);
+		entry->locked = 0;
+	}
+}
+
+int drm_ttm_remap_bound(drm_ttm_t *ttm) 
+{
+	vma_entry_t *v_entry;
+	int ret = 0;
+	
+	list_for_each_entry(v_entry, &ttm->vma_list, head) {
+		ret = drm_ttm_map_bound(v_entry->vma);
+		if (ret)
+			break;
+	}
+
+	drm_ttm_unlock_mm(ttm);
+	return ret;
+}
+
+void drm_ttm_finish_unmap(drm_ttm_t *ttm)
+{
+	vma_entry_t *v_entry;
+	
+	if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED))
+		return;
+
+	list_for_each_entry(v_entry, &ttm->vma_list, head) {
+		v_entry->vma->vm_flags &= ~VM_PFNMAP; 
+	}
+	drm_ttm_unlock_mm(ttm);
+}	
+
+#endif
+
diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h
index 407853d..a1a9439 100644
--- a/linux-core/drm_compat.h
+++ b/linux-core/drm_compat.h
@@ -31,6 +31,7 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#include <asm/agp.h>
 #ifndef _DRM_COMPAT_H_
 #define _DRM_COMPAT_H_
 
@@ -227,4 +228,152 @@
 }
 #endif
 
+#include <linux/mm.h>
+#include <asm/page.h>
+
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) && \
+     (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))) 
+#define DRM_ODD_MM_COMPAT
+#endif
+
+
+
+/*
+ * Flush relevant caches and clear a VMA structure so that page references 
+ * will cause a page fault. Don't flush tlbs.
+ */
+
+extern void drm_clear_vma(struct vm_area_struct *vma,
+			  unsigned long addr, unsigned long end);
+
+/*
+ * Return the PTE protection map entries for the VMA flags given by 
+ * flags. This is a functional interface to the kernel's protection map.
+ */
+
+extern pgprot_t vm_get_page_prot(unsigned long vm_flags);
+
+/*
+ * These are similar to the current kernel gatt pages allocator, only that we
+ * want a struct page pointer instead of a virtual address. This allows for pages
+ * that are not in the kernel linear map.
+ */
+
+#define drm_alloc_gatt_pages(order) ({					\
+			void *_virt = alloc_gatt_pages(order);		\
+			((_virt) ? virt_to_page(_virt) : NULL);})
+#define drm_free_gatt_pages(pages, order) free_gatt_pages(page_address(pages), order) 
+
+#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+
+/*
+ * These are too slow in earlier kernels.
+ */
+
+extern int drm_unmap_page_from_agp(struct page *page);
+extern int drm_map_page_into_agp(struct page *page);
+
+#define map_page_into_agp drm_map_page_into_agp
+#define unmap_page_from_agp drm_unmap_page_from_agp
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+extern struct page *get_nopage_retry(void);
+extern void free_nopage_retry(void);
+struct fault_data;
+extern struct page *drm_vm_ttm_fault(struct vm_area_struct *vma, 
+				     struct fault_data *data);
+
+#define NOPAGE_REFAULT get_nopage_retry()
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
+
+/*
+ * Hopefully, real NOPAGE_RETRY functionality will be in 2.6.19. 
+ * For now, just return a dummy page that we've allocated out of 
+ * static space. The page will be put by do_nopage() since we've already
+ * filled out the pte.
+ */
+
+struct fault_data {
+	struct vm_area_struct *vma;
+	unsigned long address;
+	pgoff_t pgoff;
+	unsigned int flags;
+	
+	int type;
+};
+
+
+extern int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 
+			 unsigned long pfn, pgprot_t pgprot);
+
+extern struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
+				      unsigned long address, 
+				      int *type);
+
+#endif
+
+#ifdef DRM_ODD_MM_COMPAT
+
+struct drm_ttm;
+
+
+/*
+ * Add a vma to the ttm vma list, and the 
+ * process mm pointer to the ttm mm list. Needs the ttm mutex.
+ */
+
+extern int drm_ttm_add_vma(struct drm_ttm * ttm, 
+			   struct vm_area_struct *vma);
+/*
+ * Delete a vma and the corresponding mm pointer from the
+ * ttm lists. Needs the ttm mutex.
+ */
+extern void drm_ttm_delete_vma(struct drm_ttm * ttm, 
+			       struct vm_area_struct *vma);
+
+/*
+ * Attempts to lock all relevant mmap_sems for a ttm, while
+ * not releasing the ttm mutex. May return -EAGAIN to avoid 
+ * deadlocks. In that case the caller shall release the ttm mutex,
+ * schedule() and try again.
+ */
+
+extern int drm_ttm_lock_mm(struct drm_ttm * ttm);
+
+/*
+ * Unlock all relevant mmap_sems for a ttm.
+ */
+extern void drm_ttm_unlock_mm(struct drm_ttm * ttm);
+
+/*
+ * If the ttm was bound to the aperture, this function shall be called
+ * with all relevant mmap sems held. It deletes the flag VM_PFNMAP from all
+ * vmas mapping this ttm. This is needed just after unmapping the ptes of
+ * the vma, otherwise the do_nopage() function will bug :(. The function
+ * releases the mmap_sems for this ttm.
+ */
+
+extern void drm_ttm_finish_unmap(struct drm_ttm *ttm);
+
+/*
+ * Remap all vmas of this ttm using io_remap_pfn_range. We cannot 
+ * fault these pfns in, because the first one will set the vma VM_PFNMAP
+ * flag, which will make the next fault bug in do_nopage(). The function
+ * releases the mmap_sems for this ttm.
+ */
+
+extern int drm_ttm_remap_bound(struct drm_ttm *ttm);
+
+
+/*
+ * Remap a vma for a bound ttm. Call with the ttm mutex held and
+ * the relevant mmap_sem locked.
+ */
+extern int drm_ttm_map_bound(struct vm_area_struct *vma);
+
+#endif
 #endif
diff --git a/linux-core/drm_drawable.c b/linux-core/drm_drawable.c
deleted file mode 120000
index d64bbe1..0000000
--- a/linux-core/drm_drawable.c
+++ /dev/null
@@ -1 +0,0 @@
-../shared-core/drm_drawable.c
\ No newline at end of file
diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c
index 228c8b8..518e2aa 100644
--- a/linux-core/drm_drv.c
+++ b/linux-core/drm_drv.c
@@ -119,12 +119,17 @@
 	[DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
 
 	[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0},
+	[DRM_IOCTL_NR(DRM_IOCTL_FENCE)] = {drm_fence_ioctl, DRM_AUTH},
+	[DRM_IOCTL_NR(DRM_IOCTL_BUFOBJ)] = {drm_bo_ioctl, DRM_AUTH},
+	[DRM_IOCTL_NR(DRM_IOCTL_MM_INIT)] = {drm_mm_init_ioctl, 
+					     DRM_AUTH },
 
 	[DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
 };
 
 #define DRIVER_IOCTL_COUNT	ARRAY_SIZE( drm_ioctls )
 
+
 /**
  * Take down the DRM device.
  *
@@ -143,6 +148,11 @@
 
 	DRM_DEBUG("\n");
 
+	if (drm_bo_driver_finish(dev)) {
+		DRM_ERROR("DRM memory manager still busy. "
+			  "System is unstable. Please reboot.\n");
+	}
+
 	if (dev->driver->lastclose)
 		dev->driver->lastclose(dev);
 	DRM_DEBUG("driver lastclose completed\n");
@@ -218,7 +228,7 @@
 	if (dev->vmalist) {
 		for (vma = dev->vmalist; vma; vma = vma_next) {
 			vma_next = vma->next;
-			drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
+			drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS);
 		}
 		dev->vmalist = NULL;
 	}
@@ -256,6 +266,7 @@
 		dev->lock.filp = NULL;
 		wake_up_interruptible(&dev->lock.lock_queue);
 	}
+	dev->dev_mapping = NULL;
 	mutex_unlock(&dev->struct_mutex);
 
 	DRM_DEBUG("lastclose completed\n");
@@ -360,11 +371,14 @@
 	}
 
 	drm_lastclose(dev);
+	drm_fence_manager_takedown(dev);
 
 	if (dev->maplist) {
 		drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
 		dev->maplist = NULL;
 		drm_ht_remove(&dev->map_hash);
+		drm_mm_takedown(&dev->offset_manager);
+		drm_ht_remove(&dev->object_hash);
 	}
 
 	if (!drm_fb_loaded)
@@ -419,6 +433,9 @@
 		}
 	} else
 		pci_unregister_driver(&driver->pci_driver);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+	free_nopage_retry();
+#endif
 	DRM_INFO("Module unloaded\n");
 }
 EXPORT_SYMBOL(drm_exit);
@@ -429,10 +446,64 @@
 	.open = drm_stub_open
 };
 
+static int drm_create_memory_caches(void)
+{
+	drm_cache.mm = kmem_cache_create("drm_mm_node_t", 
+					 sizeof(drm_mm_node_t),
+					 0,
+					 SLAB_HWCACHE_ALIGN,
+					 NULL,NULL);
+	if (!drm_cache.mm)
+		return -ENOMEM;
+
+	drm_cache.fence_object= kmem_cache_create("drm_fence_object_t", 
+						  sizeof(drm_fence_object_t),
+						  0,
+						  SLAB_HWCACHE_ALIGN,
+						  NULL,NULL);
+	if (!drm_cache.fence_object)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void drm_free_mem_cache(kmem_cache_t *cache, 
+			       const char *name)
+{
+	if (!cache)
+		return;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
+	if (kmem_cache_destroy(cache)) {
+		DRM_ERROR("Warning! DRM is leaking %s memory.\n",
+			  name);
+	}
+#else
+	kmem_cache_destroy(cache);
+#endif
+}
+
+static void drm_free_memory_caches(void )
+{
+	
+	drm_free_mem_cache(drm_cache.fence_object, "fence object");
+	drm_cache.fence_object = NULL;
+	drm_free_mem_cache(drm_cache.mm, "memory manager block");
+	drm_cache.mm = NULL;
+}
+
+
 static int __init drm_core_init(void)
 {
-	int ret = -ENOMEM;
+	int ret;
+	struct sysinfo si;
+	
+	si_meminfo(&si);
+	drm_init_memctl(si.totalram/2, si.totalram*3/4);
+	ret = drm_create_memory_caches();
+	if (ret)
+		goto err_p1;
 
+	ret = -ENOMEM;
 	drm_cards_limit =
 	    (drm_cards_limit < DRM_MAX_MINOR + 1 ? drm_cards_limit : DRM_MAX_MINOR + 1);
 	drm_heads = drm_calloc(drm_cards_limit, sizeof(*drm_heads), DRM_MEM_STUB);
@@ -468,11 +539,13 @@
 	unregister_chrdev(DRM_MAJOR, "drm");
 	drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB);
 err_p1:
+	drm_free_memory_caches();
 	return ret;
 }
 
 static void __exit drm_core_exit(void)
 {
+	drm_free_memory_caches();
 	remove_proc_entry("dri", NULL);
 	drm_sysfs_destroy(drm_class);
 
@@ -549,13 +622,18 @@
 		  current->pid, cmd, nr, (long)old_encode_dev(priv->head->device),
 		  priv->authenticated);
 
-	if (nr < DRIVER_IOCTL_COUNT)
-		ioctl = &drm_ioctls[nr];
-	else if ((nr >= DRM_COMMAND_BASE)
-		 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
-		ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
-	else
+	if (nr >= DRIVER_IOCTL_COUNT && 
+	    (nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END))
 		goto err_i1;
+	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
+		&& (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
+			ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+	else if (nr >= DRM_COMMAND_END || nr < DRM_COMMAND_BASE)	
+		ioctl = &drm_ioctls[nr];
+	else 
+		goto err_i1;
+
+
 
 	func = ioctl->func;
 	if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)	/* Local override? */
diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c
new file mode 100644
index 0000000..f656340
--- /dev/null
+++ b/linux-core/drm_fence.c
@@ -0,0 +1,619 @@
+/**************************************************************************
+ * 
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ * 
+ * 
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+
+/*
+ * Typically called by the IRQ handler.
+ */
+
+void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
+{
+	int wake = 0;
+	uint32_t diff;
+	uint32_t relevant;
+	drm_fence_manager_t *fm = &dev->fm;
+	drm_fence_driver_t *driver = dev->driver->fence_driver;
+	struct list_head *list, *prev;
+	drm_fence_object_t *fence;
+	int found = 0;
+
+	if (list_empty(&fm->ring))
+		return;
+
+	list_for_each_entry(fence, &fm->ring, ring) {
+		diff = (sequence - fence->sequence) & driver->sequence_mask;
+		if (diff > driver->wrap_diff) {
+			found = 1;
+			break;
+		}
+	}
+
+	list = (found) ? fence->ring.prev : fm->ring.prev;
+	prev = list->prev;
+
+	for (; list != &fm->ring; list = prev, prev = list->prev) {
+		fence = list_entry(list, drm_fence_object_t, ring);
+
+		type |= fence->native_type;
+		relevant = type & fence->type;
+
+		if ((fence->signaled | relevant) != fence->signaled) {
+			fence->signaled |= relevant;
+			DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
+				  fence->base.hash.key, fence->signaled);
+			fence->submitted_flush |= relevant;
+			wake = 1;
+		}
+
+		relevant = fence->flush_mask &
+		    ~(fence->signaled | fence->submitted_flush);
+
+		if (relevant) {
+			fm->pending_flush |= relevant;
+			fence->submitted_flush = fence->flush_mask;
+		}
+
+		if (!(fence->type & ~fence->signaled)) {
+			DRM_DEBUG("Fence completely signaled 0x%08lx\n",
+				  fence->base.hash.key);
+			list_del_init(&fence->ring);
+		}
+
+	}
+
+	if (wake) {
+		DRM_WAKEUP(&fm->fence_queue);
+	}
+}
+
+EXPORT_SYMBOL(drm_fence_handler);
+
+static void drm_fence_unring(drm_device_t * dev, struct list_head *ring)
+{
+	drm_fence_manager_t *fm = &dev->fm;
+	unsigned long flags;
+
+	write_lock_irqsave(&fm->lock, flags);
+	list_del_init(ring);
+	write_unlock_irqrestore(&fm->lock, flags);
+}
+
+void drm_fence_usage_deref_locked(drm_device_t * dev,
+				  drm_fence_object_t * fence)
+{
+	drm_fence_manager_t *fm = &dev->fm;
+
+	if (atomic_dec_and_test(&fence->usage)) {
+		drm_fence_unring(dev, &fence->ring);
+		DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
+			  fence->base.hash.key);
+		atomic_dec(&fm->count);
+		drm_ctl_cache_free(drm_cache.fence_object, sizeof(*fence),
+				   fence);
+	}
+}
+
+void drm_fence_usage_deref_unlocked(drm_device_t * dev,
+				    drm_fence_object_t * fence)
+{
+	drm_fence_manager_t *fm = &dev->fm;
+
+	if (atomic_dec_and_test(&fence->usage)) {
+		mutex_lock(&dev->struct_mutex);
+		if (atomic_read(&fence->usage) == 0) {
+			drm_fence_unring(dev, &fence->ring);
+			atomic_dec(&fm->count);
+			drm_ctl_cache_free(drm_cache.fence_object,
+					   sizeof(*fence), fence);
+		}
+		mutex_unlock(&dev->struct_mutex);
+	}
+}
+
+static void drm_fence_object_destroy(drm_file_t * priv,
+				     drm_user_object_t * base)
+{
+	drm_device_t *dev = priv->head->dev;
+	drm_fence_object_t *fence =
+	    drm_user_object_entry(base, drm_fence_object_t, base);
+
+	drm_fence_usage_deref_locked(dev, fence);
+}
+
+static int fence_signaled(drm_device_t * dev, volatile
+			  drm_fence_object_t * fence,
+			  uint32_t mask, int poke_flush)
+{
+	unsigned long flags;
+	int signaled;
+	drm_fence_manager_t *fm = &dev->fm;
+	drm_fence_driver_t *driver = dev->driver->fence_driver;
+
+	if (poke_flush)
+		driver->poke_flush(dev);
+	read_lock_irqsave(&fm->lock, flags);
+	signaled =
+	    (fence->type & mask & fence->signaled) == (fence->type & mask);
+	read_unlock_irqrestore(&fm->lock, flags);
+
+	return signaled;
+}
+
+static void drm_fence_flush_exe(drm_fence_manager_t * fm,
+				drm_fence_driver_t * driver, uint32_t sequence)
+{
+	uint32_t diff;
+
+	if (!fm->pending_exe_flush) {
+		volatile struct list_head *list;
+
+		/*
+		 * Last_exe_flush is invalid. Find oldest sequence.
+		 */
+
+/*		list = fm->fence_types[_DRM_FENCE_TYPE_EXE];*/
+		list = &fm->ring;
+		if (list->next == &fm->ring) {
+			return;
+		} else {
+			drm_fence_object_t *fence =
+			    list_entry(list->next, drm_fence_object_t, ring);
+			fm->last_exe_flush = (fence->sequence - 1) &
+			    driver->sequence_mask;
+		}
+		diff = (sequence - fm->last_exe_flush) & driver->sequence_mask;
+		if (diff >= driver->wrap_diff)
+			return;
+		fm->exe_flush_sequence = sequence;
+		fm->pending_exe_flush = 1;
+	} else {
+		diff =
+		    (sequence - fm->exe_flush_sequence) & driver->sequence_mask;
+		if (diff < driver->wrap_diff) {
+			fm->exe_flush_sequence = sequence;
+		}
+	}
+}
+
+int drm_fence_object_signaled(volatile drm_fence_object_t * fence,
+			      uint32_t type)
+{
+	return ((fence->signaled & type) == type);
+}
+
+int drm_fence_object_flush(drm_device_t * dev,
+			   volatile drm_fence_object_t * fence, uint32_t type)
+{
+	drm_fence_manager_t *fm = &dev->fm;
+	drm_fence_driver_t *driver = dev->driver->fence_driver;
+	unsigned long flags;
+
+	if (type & ~fence->type) {
+		DRM_ERROR("Flush trying to extend fence type, "
+			  "0x%x, 0x%x\n", type, fence->type);
+		return -EINVAL;
+	}
+
+	write_lock_irqsave(&fm->lock, flags);
+	fence->flush_mask |= type;
+	if (fence->submitted_flush == fence->signaled) {
+		if ((fence->type & DRM_FENCE_TYPE_EXE) &&
+		    !(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {
+			drm_fence_flush_exe(fm, driver, fence->sequence);
+			fence->submitted_flush |= DRM_FENCE_TYPE_EXE;
+		} else {
+			fm->pending_flush |= (fence->flush_mask &
+					      ~fence->submitted_flush);
+			fence->submitted_flush = fence->flush_mask;
+		}
+	}
+	write_unlock_irqrestore(&fm->lock, flags);
+	driver->poke_flush(dev);
+	return 0;
+}
+
+/*
+ * Make sure old fence objects are signaled before their fence sequences are
+ * wrapped around and reused.
+ */
+
+void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
+{
+	drm_fence_manager_t *fm = &dev->fm;
+	drm_fence_driver_t *driver = dev->driver->fence_driver;
+	uint32_t old_sequence;
+	unsigned long flags;
+	drm_fence_object_t *fence;
+	uint32_t diff;
+
+	mutex_lock(&dev->struct_mutex);
+	read_lock_irqsave(&fm->lock, flags);
+	if (fm->ring.next == &fm->ring) {
+		read_unlock_irqrestore(&fm->lock, flags);
+		mutex_unlock(&dev->struct_mutex);
+		return;
+	}
+	old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
+	fence = list_entry(fm->ring.next, drm_fence_object_t, ring);
+	atomic_inc(&fence->usage);
+	mutex_unlock(&dev->struct_mutex);
+	diff = (old_sequence - fence->sequence) & driver->sequence_mask;
+	read_unlock_irqrestore(&fm->lock, flags);
+	if (diff < driver->wrap_diff) {
+		drm_fence_object_flush(dev, fence, fence->type);
+	}
+	drm_fence_usage_deref_unlocked(dev, fence);
+}
+
+EXPORT_SYMBOL(drm_fence_flush_old);
+
+int drm_fence_object_wait(drm_device_t * dev,
+			  volatile drm_fence_object_t * fence,
+			  int lazy, int ignore_signals, uint32_t mask)
+{
+	drm_fence_manager_t *fm = &dev->fm;
+	drm_fence_driver_t *driver = dev->driver->fence_driver;
+	int ret = 0;
+	unsigned long _end;
+	int signaled;
+
+	if (mask & ~fence->type) {
+		DRM_ERROR("Wait trying to extend fence type"
+			  " 0x%08x 0x%08x\n", mask, fence->type);
+		return -EINVAL;
+	}
+
+	if (fence_signaled(dev, fence, mask, 0))
+		return 0;
+
+	_end = jiffies + 3 * DRM_HZ;
+
+	drm_fence_object_flush(dev, fence, mask);
+
+	if (lazy && driver->lazy_capable) {
+
+		do {
+			DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
+				    fence_signaled(dev, fence, mask, 1));
+			if (time_after_eq(jiffies, _end))
+				break;
+		} while (ret == -EINTR && ignore_signals);
+		if (time_after_eq(jiffies, _end) && (ret != 0))
+			ret = -EBUSY;
+		if (ret) {
+			if (ret == -EBUSY) {
+				DRM_ERROR("Fence timeout. "
+					  "GPU lockup or fence driver was "
+					  "taken down.\n");
+			}
+			return ((ret == -EINTR) ? -EAGAIN : ret);
+		}
+	} else if ((fence->class == 0) && (mask & DRM_FENCE_TYPE_EXE) &&
+		   driver->lazy_capable) {
+
+		/*
+		 * We use IRQ wait for EXE fence if available to gain 
+		 * CPU in some cases.
+		 */
+
+		do {
+			DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
+				    fence_signaled(dev, fence,
+						   DRM_FENCE_TYPE_EXE, 1));
+			if (time_after_eq(jiffies, _end))
+				break;
+		} while (ret == -EINTR && ignore_signals);
+		if (time_after_eq(jiffies, _end) && (ret != 0))
+			ret = -EBUSY;
+		if (ret)
+			return ((ret == -EINTR) ? -EAGAIN : ret);
+	}
+
+	if (fence_signaled(dev, fence, mask, 0))
+		return 0;
+
+	/*
+	 * Avoid kernel-space busy-waits.
+	 */
+#if 1
+	if (!ignore_signals)
+		return -EAGAIN;
+#endif
+	do {
+		schedule();
+		signaled = fence_signaled(dev, fence, mask, 1);
+	} while (!signaled && !time_after_eq(jiffies, _end));
+
+	if (!signaled)
+		return -EBUSY;
+
+	return 0;
+}
+
+int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
+			  uint32_t fence_flags, uint32_t type)
+{
+	drm_fence_manager_t *fm = &dev->fm;
+	drm_fence_driver_t *driver = dev->driver->fence_driver;
+	unsigned long flags;
+	uint32_t sequence;
+	uint32_t native_type;
+	int ret;
+
+	drm_fence_unring(dev, &fence->ring);
+	ret = driver->emit(dev, fence_flags, &sequence, &native_type);
+	if (ret)
+		return ret;
+
+	write_lock_irqsave(&fm->lock, flags);
+	fence->type = type;
+	fence->flush_mask = 0x00;
+	fence->submitted_flush = 0x00;
+	fence->signaled = 0x00;
+	fence->sequence = sequence;
+	fence->native_type = native_type;
+	list_add_tail(&fence->ring, &fm->ring);
+	write_unlock_irqrestore(&fm->lock, flags);
+	return 0;
+}
+
+static int drm_fence_object_init(drm_device_t * dev, uint32_t type,
+				 uint32_t fence_flags,
+				 drm_fence_object_t * fence)
+{
+	int ret = 0;
+	unsigned long flags;
+	drm_fence_manager_t *fm = &dev->fm;
+
+	mutex_lock(&dev->struct_mutex);
+	atomic_set(&fence->usage, 1);
+	mutex_unlock(&dev->struct_mutex);
+
+	write_lock_irqsave(&fm->lock, flags);
+	INIT_LIST_HEAD(&fence->ring);
+	fence->class = 0;
+	fence->type = type;
+	fence->flush_mask = 0;
+	fence->submitted_flush = 0;
+	fence->signaled = 0;
+	fence->sequence = 0;
+	write_unlock_irqrestore(&fm->lock, flags);
+	if (fence_flags & DRM_FENCE_FLAG_EMIT) {
+		ret = drm_fence_object_emit(dev, fence, fence_flags, type);
+	}
+	return ret;
+}
+
+int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence,
+			      int shareable)
+{
+	drm_device_t *dev = priv->head->dev;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	ret = drm_add_user_object(priv, &fence->base, shareable);
+	mutex_unlock(&dev->struct_mutex);
+	if (ret)
+		return ret;
+	fence->base.type = drm_fence_type;
+	fence->base.remove = &drm_fence_object_destroy;
+	DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
+	return 0;
+}
+
+EXPORT_SYMBOL(drm_fence_add_user_object);
+
+int drm_fence_object_create(drm_device_t * dev, uint32_t type,
+			    unsigned flags, drm_fence_object_t ** c_fence)
+{
+	drm_fence_object_t *fence;
+	int ret;
+	drm_fence_manager_t *fm = &dev->fm;
+
+	fence = drm_ctl_cache_alloc(drm_cache.fence_object,
+				    sizeof(*fence), GFP_KERNEL);
+	if (!fence)
+		return -ENOMEM;
+	ret = drm_fence_object_init(dev, type, flags, fence);
+	if (ret) {
+		drm_fence_usage_deref_unlocked(dev, fence);
+		return ret;
+	}
+	*c_fence = fence;
+	atomic_inc(&fm->count);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(drm_fence_object_create);
+
+void drm_fence_manager_init(drm_device_t * dev)
+{
+	drm_fence_manager_t *fm = &dev->fm;
+	drm_fence_driver_t *fed = dev->driver->fence_driver;
+	int i;
+
+	fm->lock = RW_LOCK_UNLOCKED;
+	write_lock(&fm->lock);
+	INIT_LIST_HEAD(&fm->ring);
+	fm->pending_flush = 0;
+	DRM_INIT_WAITQUEUE(&fm->fence_queue);
+	fm->initialized = 0;
+	if (fed) {
+		fm->initialized = 1;
+		atomic_set(&fm->count, 0);
+		for (i = 0; i < fed->no_types; ++i) {
+			fm->fence_types[i] = &fm->ring;
+		}
+	}
+	write_unlock(&fm->lock);
+}
+
+void drm_fence_manager_takedown(drm_device_t * dev)
+{
+}
+
+drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle)
+{
+	drm_device_t *dev = priv->head->dev;
+	drm_user_object_t *uo;
+	drm_fence_object_t *fence;
+
+	mutex_lock(&dev->struct_mutex);
+	uo = drm_lookup_user_object(priv, handle);
+	if (!uo || (uo->type != drm_fence_type)) {
+		mutex_unlock(&dev->struct_mutex);
+		return NULL;
+	}
+	fence = drm_user_object_entry(uo, drm_fence_object_t, base);
+	atomic_inc(&fence->usage);
+	mutex_unlock(&dev->struct_mutex);
+	return fence;
+}
+
+int drm_fence_ioctl(DRM_IOCTL_ARGS)
+{
+	DRM_DEVICE;
+	int ret;
+	drm_fence_manager_t *fm = &dev->fm;
+	drm_fence_arg_t arg;
+	drm_fence_object_t *fence;
+	drm_user_object_t *uo;
+	unsigned long flags;
+	ret = 0;
+
+	if (!fm->initialized) {
+		DRM_ERROR("The DRM driver does not support fencing.\n");
+		return -EINVAL;
+	}
+
+	DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+	switch (arg.op) {
+	case drm_fence_create:
+		if (arg.flags & DRM_FENCE_FLAG_EMIT)
+			LOCK_TEST_WITH_RETURN(dev, filp);
+		ret = drm_fence_object_create(dev, arg.type, arg.flags, &fence);
+		if (ret)
+			return ret;
+		ret = drm_fence_add_user_object(priv, fence,
+						arg.flags &
+						DRM_FENCE_FLAG_SHAREABLE);
+		if (ret) {
+			drm_fence_usage_deref_unlocked(dev, fence);
+			return ret;
+		}
+
+		/*
+		 * usage > 0. No need to lock dev->struct_mutex;
+		 */
+
+		atomic_inc(&fence->usage);
+		arg.handle = fence->base.hash.key;
+		break;
+	case drm_fence_destroy:
+		mutex_lock(&dev->struct_mutex);
+		uo = drm_lookup_user_object(priv, arg.handle);
+		if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) {
+			mutex_unlock(&dev->struct_mutex);
+			return -EINVAL;
+		}
+		ret = drm_remove_user_object(priv, uo);
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	case drm_fence_reference:
+		ret =
+		    drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo);
+		if (ret)
+			return ret;
+		fence = drm_lookup_fence_object(priv, arg.handle);
+		break;
+	case drm_fence_unreference:
+		ret = drm_user_object_unref(priv, arg.handle, drm_fence_type);
+		return ret;
+	case drm_fence_signaled:
+		fence = drm_lookup_fence_object(priv, arg.handle);
+		if (!fence)
+			return -EINVAL;
+		break;
+	case drm_fence_flush:
+		fence = drm_lookup_fence_object(priv, arg.handle);
+		if (!fence)
+			return -EINVAL;
+		ret = drm_fence_object_flush(dev, fence, arg.type);
+		break;
+	case drm_fence_wait:
+		fence = drm_lookup_fence_object(priv, arg.handle);
+		if (!fence)
+			return -EINVAL;
+		ret =
+		    drm_fence_object_wait(dev, fence,
+					  arg.flags & DRM_FENCE_FLAG_WAIT_LAZY,
+					  0, arg.type);
+		break;
+	case drm_fence_emit:
+		LOCK_TEST_WITH_RETURN(dev, filp);
+		fence = drm_lookup_fence_object(priv, arg.handle);
+		if (!fence)
+			return -EINVAL;
+		ret = drm_fence_object_emit(dev, fence, arg.flags, arg.type);
+		break;
+	case drm_fence_buffers:
+		if (!dev->bm.initialized) {
+			DRM_ERROR("Buffer object manager is not initialized\n");
+			return -EINVAL;
+		}
+		LOCK_TEST_WITH_RETURN(dev, filp);
+		ret = drm_fence_buffer_objects(priv, NULL, arg.flags,
+					       NULL, &fence);
+		if (ret)
+			return ret;
+		ret = drm_fence_add_user_object(priv, fence,
+						arg.flags &
+						DRM_FENCE_FLAG_SHAREABLE);
+		if (ret)
+			return ret;
+		atomic_inc(&fence->usage);
+		arg.handle = fence->base.hash.key;
+		break;
+	default:
+		return -EINVAL;
+	}
+	read_lock_irqsave(&fm->lock, flags);
+	arg.class = fence->class;
+	arg.type = fence->type;
+	arg.signaled = fence->signaled;
+	read_unlock_irqrestore(&fm->lock, flags);
+	drm_fence_usage_deref_unlocked(dev, fence);
+
+	DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
+	return ret;
+}
diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c
index 48c7754..b60ced3 100644
--- a/linux-core/drm_fops.c
+++ b/linux-core/drm_fops.c
@@ -47,6 +47,7 @@
 	int i;
 	int ret;
 
+
 	if (dev->driver->firstopen) {
 		ret = dev->driver->firstopen(dev);
 		if (ret != 0)
@@ -56,6 +57,7 @@
 	dev->magicfree.next = NULL;
 
 	/* prebuild the SAREA */
+
 	i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
 	if (i != 0)
 		return i;
@@ -156,6 +158,12 @@
 		}
 		spin_unlock(&dev->count_lock);
 	}
+	mutex_lock(&dev->struct_mutex);
+	BUG_ON((dev->dev_mapping != NULL) && 
+	       (dev->dev_mapping != inode->i_mapping));
+	if (dev->dev_mapping == NULL)
+		dev->dev_mapping = inode->i_mapping;
+	mutex_unlock(&dev->struct_mutex);
 
 	return retcode;
 }
@@ -233,6 +241,7 @@
 	int minor = iminor(inode);
 	drm_file_t *priv;
 	int ret;
+	int i,j;
 
 	if (filp->f_flags & O_EXCL)
 		return -EBUSY;	/* No exclusive opens */
@@ -256,6 +265,22 @@
 	priv->authenticated = capable(CAP_SYS_ADMIN);
 	priv->lock_count = 0;
 
+	INIT_LIST_HEAD(&priv->user_objects);
+	INIT_LIST_HEAD(&priv->refd_objects);
+
+	for (i=0; i<_DRM_NO_REF_TYPES; ++i) {
+		ret = drm_ht_create(&priv->refd_object_hash[i], DRM_FILE_HASH_ORDER);
+		if (ret)
+			break;
+	}
+
+	if (ret) {
+		for(j=0; j<i; ++j) {
+			drm_ht_remove(&priv->refd_object_hash[j]);
+		}
+		goto out_free;
+	}
+
 	if (dev->driver->open) {
 		ret = dev->driver->open(dev, priv);
 		if (ret < 0)
@@ -320,6 +345,53 @@
 }
 EXPORT_SYMBOL(drm_fasync);
 
+static void drm_object_release(struct file *filp) {
+
+        drm_file_t *priv = filp->private_data;
+	struct list_head *head;
+	drm_user_object_t *user_object;
+	drm_ref_object_t *ref_object;
+	int i;
+
+	/*
+	 * Free leftover ref objects created by me. Note that we cannot use
+	 * list_for_each() here, as the struct_mutex may be temporarily released 
+	 * by the remove_() functions, and thus the lists may be altered.
+	 * Also, a drm_remove_ref_object() will not remove it
+	 * from the list unless its refcount is 1.
+	 */
+
+	head = &priv->refd_objects; 
+	while (head->next != head) {
+		ref_object = list_entry(head->next, drm_ref_object_t, list);
+		drm_remove_ref_object(priv, ref_object);		
+		head = &priv->refd_objects; 
+	}
+		
+	/*
+	 * Free leftover user objects created by me.
+	 */
+
+	head = &priv->user_objects; 
+	while (head->next != head) {
+		user_object = list_entry(head->next, drm_user_object_t, list);
+		drm_remove_user_object(priv, user_object);		
+		head = &priv->user_objects; 
+	}
+
+
+
+
+	for(i=0; i<_DRM_NO_REF_TYPES; ++i) {
+		drm_ht_remove(&priv->refd_object_hash[i]);
+	}
+}			
+		
+
+
+
+
+
 /**
  * Release file.
  *
@@ -354,58 +426,43 @@
 		  current->pid, (long)old_encode_dev(priv->head->device),
 		  dev->open_count);
 
-	if (priv->lock_count && dev->lock.hw_lock &&
-	    _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
-	    dev->lock.filp == filp) {
+	if (dev->driver->reclaim_buffers_locked) {
+	        unsigned long _end = jiffies + DRM_HZ*3;
+
+		do {
+			retcode = drm_kernel_take_hw_lock(filp);
+		} while(retcode && !time_after_eq(jiffies,_end));
+
+		if (!retcode) {
+			dev->driver->reclaim_buffers_locked(dev, filp);
+
+			drm_lock_free(dev, &dev->lock.hw_lock->lock,
+				      _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+		} else {
+
+			/*
+			 * FIXME: This is not a good solution. We should perhaps associate the
+			 * DRM lock with a process context, and check whether the current process
+			 * holds the lock. Then we can run reclaim buffers locked anyway.
+			 */
+
+			DRM_ERROR("Reclaim buffers locked deadlock.\n");
+			DRM_ERROR("This is probably a single thread having multiple\n");
+			DRM_ERROR("DRM file descriptors open either dying or "
+				  "closing file descriptors\n");
+			DRM_ERROR("while having the lock. I will not reclaim buffers.\n");
+			DRM_ERROR("Locking context is 0x%08x\n",
+				  _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+		}
+	} else if (drm_i_have_hw_lock(filp)) {
 		DRM_DEBUG("File %p released, freeing lock for context %d\n",
 			  filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
 
-		if (dev->driver->reclaim_buffers_locked)
-			dev->driver->reclaim_buffers_locked(dev, filp);
-
 		drm_lock_free(dev, &dev->lock.hw_lock->lock,
 			      _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
-
-		/* FIXME: may require heavy-handed reset of
-		   hardware at this point, possibly
-		   processed via a callback to the X
-		   server. */
-	} else if (dev->driver->reclaim_buffers_locked && priv->lock_count
-		   && dev->lock.hw_lock) {
-		/* The lock is required to reclaim buffers */
-		DECLARE_WAITQUEUE(entry, current);
-
-		add_wait_queue(&dev->lock.lock_queue, &entry);
-		for (;;) {
-			__set_current_state(TASK_INTERRUPTIBLE);
-			if (!dev->lock.hw_lock) {
-				/* Device has been unregistered */
-				retcode = -EINTR;
-				break;
-			}
-			if (drm_lock_take(&dev->lock.hw_lock->lock,
-					  DRM_KERNEL_CONTEXT)) {
-				dev->lock.filp = filp;
-				dev->lock.lock_time = jiffies;
-				atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
-				break;	/* Got lock */
-			}
-			/* Contention */
-			schedule();
-			if (signal_pending(current)) {
-				retcode = -ERESTARTSYS;
-				break;
-			}
-		}
-		__set_current_state(TASK_RUNNING);
-		remove_wait_queue(&dev->lock.lock_queue, &entry);
-		if (!retcode) {
-			dev->driver->reclaim_buffers_locked(dev, filp);
-			drm_lock_free(dev, &dev->lock.hw_lock->lock,
-				      DRM_KERNEL_CONTEXT);
-		}
 	}
 
+
 	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
 	    !dev->driver->reclaim_buffers_locked) {
 		dev->driver->reclaim_buffers(dev, filp);
@@ -414,6 +471,7 @@
 	drm_fasync(-1, filp, 0);
 
 	mutex_lock(&dev->ctxlist_mutex);
+
 	if (dev->ctxlist && (!list_empty(&dev->ctxlist->head))) {
 		drm_ctx_list_t *pos, *n;
 
@@ -435,6 +493,7 @@
 	mutex_unlock(&dev->ctxlist_mutex);
 
 	mutex_lock(&dev->struct_mutex);
+	drm_object_release(filp);
 	if (priv->remove_auth_on_close == 1) {
 		drm_file_t *temp = dev->file_first;
 		while (temp) {
diff --git a/linux-core/drm_hashtab.c b/linux-core/drm_hashtab.c
index a0b2d68..6f17e11 100644
--- a/linux-core/drm_hashtab.c
+++ b/linux-core/drm_hashtab.c
@@ -36,25 +36,34 @@
 #include "drm_hashtab.h"
 #include <linux/hash.h>
 
-int drm_ht_create(drm_open_hash_t *ht, unsigned int order)
+int drm_ht_create(drm_open_hash_t * ht, unsigned int order)
 {
 	unsigned int i;
 
 	ht->size = 1 << order;
 	ht->order = order;
 	ht->fill = 0;
-	ht->table = vmalloc(ht->size*sizeof(*ht->table));
+	ht->table = NULL;
+	ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
+	if (!ht->use_vmalloc) {
+		ht->table = drm_calloc(ht->size, sizeof(*ht->table),
+				       DRM_MEM_HASHTAB);
+	}
+	if (!ht->table) {
+		ht->use_vmalloc = 1;
+		ht->table = vmalloc(ht->size * sizeof(*ht->table));
+	}
 	if (!ht->table) {
 		DRM_ERROR("Out of memory for hash table\n");
 		return -ENOMEM;
 	}
-	for (i=0; i< ht->size; ++i) {
+	for (i = 0; i < ht->size; ++i) {
 		INIT_HLIST_HEAD(&ht->table[i]);
 	}
 	return 0;
 }
 
-void drm_ht_verbose_list(drm_open_hash_t *ht, unsigned long key)
+void drm_ht_verbose_list(drm_open_hash_t * ht, unsigned long key)
 {
 	drm_hash_item_t *entry;
 	struct hlist_head *h_list;
@@ -71,7 +80,7 @@
 	}
 }
 
-static struct hlist_node *drm_ht_find_key(drm_open_hash_t *ht, 
+static struct hlist_node *drm_ht_find_key(drm_open_hash_t * ht,
 					  unsigned long key)
 {
 	drm_hash_item_t *entry;
@@ -91,8 +100,7 @@
 	return NULL;
 }
 
-
-int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item)
+int drm_ht_insert_item(drm_open_hash_t * ht, drm_hash_item_t * item)
 {
 	drm_hash_item_t *entry;
 	struct hlist_head *h_list;
@@ -123,7 +131,7 @@
  * Just insert an item and return any "bits" bit key that hasn't been 
  * used before.
  */
-int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item,
+int drm_ht_just_insert_please(drm_open_hash_t * ht, drm_hash_item_t * item,
 			      unsigned long seed, int bits, int shift,
 			      unsigned long add)
 {
@@ -138,7 +146,7 @@
 		ret = drm_ht_insert_item(ht, item);
 		if (ret)
 			unshifted_key = (unshifted_key + 1) & mask;
-	} while(ret && (unshifted_key != first));
+	} while (ret && (unshifted_key != first));
 
 	if (ret) {
 		DRM_ERROR("Available key bit space exhausted\n");
@@ -147,8 +155,8 @@
 	return 0;
 }
 
-int drm_ht_find_item(drm_open_hash_t *ht, unsigned long key,
-		     drm_hash_item_t **item)
+int drm_ht_find_item(drm_open_hash_t * ht, unsigned long key,
+		     drm_hash_item_t ** item)
 {
 	struct hlist_node *list;
 
@@ -160,7 +168,7 @@
 	return 0;
 }
 
-int drm_ht_remove_key(drm_open_hash_t *ht, unsigned long key)
+int drm_ht_remove_key(drm_open_hash_t * ht, unsigned long key)
 {
 	struct hlist_node *list;
 
@@ -173,18 +181,21 @@
 	return -EINVAL;
 }
 
-int drm_ht_remove_item(drm_open_hash_t *ht, drm_hash_item_t *item)
+int drm_ht_remove_item(drm_open_hash_t * ht, drm_hash_item_t * item)
 {
 	hlist_del_init(&item->head);
 	ht->fill--;
 	return 0;
 }
 
-void drm_ht_remove(drm_open_hash_t *ht)
+void drm_ht_remove(drm_open_hash_t * ht)
 {
 	if (ht->table) {
-		vfree(ht->table);
+		if (ht->use_vmalloc)
+			vfree(ht->table);
+		else
+			drm_free(ht->table, ht->size * sizeof(*ht->table),
+				 DRM_MEM_HASHTAB);
 		ht->table = NULL;
 	}
 }
-
diff --git a/linux-core/drm_hashtab.h b/linux-core/drm_hashtab.h
index 40afec0..613091c 100644
--- a/linux-core/drm_hashtab.h
+++ b/linux-core/drm_hashtab.h
@@ -47,6 +47,7 @@
 	unsigned int order;
 	unsigned int fill;
 	struct hlist_head *table;
+	int use_vmalloc;
 } drm_open_hash_t;
 
 
diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c
index 4d8e4a2..c365c08 100644
--- a/linux-core/drm_irq.c
+++ b/linux-core/drm_irq.c
@@ -118,6 +118,7 @@
 		init_waitqueue_head(&dev->vbl_queue);
 
 		spin_lock_init(&dev->vbl_lock);
+		spin_lock_init(&dev->tasklet_lock);
 
 		INIT_LIST_HEAD(&dev->vbl_sigs.head);
 		INIT_LIST_HEAD(&dev->vbl_sigs2.head);
diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c
index d1b85a1..d11c570 100644
--- a/linux-core/drm_lock.c
+++ b/linux-core/drm_lock.c
@@ -35,9 +35,12 @@
 
 #include "drmP.h"
 
+#if 0
 static int drm_lock_transfer(drm_device_t * dev,
 			     __volatile__ unsigned int *lock,
 			     unsigned int context);
+#endif
+
 static int drm_notifier(void *priv);
 
 /**
@@ -181,12 +184,9 @@
 	if (dev->driver->kernel_context_switch_unlock)
 		dev->driver->kernel_context_switch_unlock(dev);
 	else {
-		drm_lock_transfer(dev, &dev->lock.hw_lock->lock,
-				  DRM_KERNEL_CONTEXT);
-
 		if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
-				  DRM_KERNEL_CONTEXT)) {
-			DRM_ERROR("\n");
+				  lock.context)) {
+			/* FIXME: Should really bail out here. */
 		}
 	}
 
@@ -212,7 +212,7 @@
 		if (old & _DRM_LOCK_HELD)
 			new = old | _DRM_LOCK_CONT;
 		else
-			new = context | _DRM_LOCK_HELD;
+			new = context | _DRM_LOCK_HELD | _DRM_LOCK_CONT;
 		prev = cmpxchg(lock, old, new);
 	} while (prev != old);
 	if (_DRM_LOCKING_CONTEXT(old) == context) {
@@ -224,13 +224,14 @@
 			return 0;
 		}
 	}
-	if (new == (context | _DRM_LOCK_HELD)) {
+	if (new == (context | _DRM_LOCK_HELD | _DRM_LOCK_CONT)) {
 		/* Have lock */
 		return 1;
 	}
 	return 0;
 }
 
+#if 0
 /**
  * This takes a lock forcibly and hands it to context.	Should ONLY be used
  * inside *_unlock to give lock to kernel before calling *_dma_schedule.
@@ -257,6 +258,7 @@
 	} while (prev != old);
 	return 1;
 }
+#endif
 
 /**
  * Free lock.
@@ -274,12 +276,12 @@
 {
 	unsigned int old, new, prev;
 
-	dev->lock.filp = NULL;
 	do {
 		old = *lock;
-		new = 0;
+		new = _DRM_LOCKING_CONTEXT(old);
 		prev = cmpxchg(lock, old, new);
 	} while (prev != old);
+
 	if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
 		DRM_ERROR("%d freed heavyweight lock held by %d\n",
 			  context, _DRM_LOCKING_CONTEXT(old));
@@ -319,3 +321,66 @@
 	} while (prev != old);
 	return 0;
 }
+
+/*
+ * Can be used by drivers to take the hardware lock if necessary.
+ * (Waiting for idle before reclaiming buffers etc.)
+ */
+
+int drm_i_have_hw_lock(struct file *filp)
+{
+	DRM_DEVICE;
+
+	return (priv->lock_count && dev->lock.hw_lock &&
+		_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
+		dev->lock.filp == filp);
+}
+
+EXPORT_SYMBOL(drm_i_have_hw_lock);
+
+int drm_kernel_take_hw_lock(struct file *filp)
+{
+	DRM_DEVICE;
+
+	int ret = 0; 
+	unsigned long _end = jiffies + 3*DRM_HZ;
+	
+	if (!drm_i_have_hw_lock(filp)) {
+	
+		DECLARE_WAITQUEUE(entry, current);
+
+		add_wait_queue(&dev->lock.lock_queue, &entry);
+		for (;;) {
+			__set_current_state(TASK_INTERRUPTIBLE);
+			if (!dev->lock.hw_lock) {
+				/* Device has been unregistered */
+				ret = -EINTR;
+				break;
+			}
+			if (drm_lock_take(&dev->lock.hw_lock->lock,
+					  DRM_KERNEL_CONTEXT)) {
+				dev->lock.filp = filp;
+				dev->lock.lock_time = jiffies;
+				atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
+				break;	/* Got lock */
+			}
+			/* Contention */
+			if (time_after_eq(jiffies,_end)) {
+			        ret = -EBUSY;
+				break;
+			}
+
+			schedule_timeout(1);
+			if (signal_pending(current)) {
+				ret = -ERESTARTSYS;
+				break;
+			}
+		}
+		__set_current_state(TASK_RUNNING);
+		remove_wait_queue(&dev->lock.lock_queue, &entry);
+	}
+	return ret;
+}
+
+EXPORT_SYMBOL(drm_kernel_take_hw_lock);
+
diff --git a/linux-core/drm_memory.c b/linux-core/drm_memory.c
index 9125cd4..3370c27 100644
--- a/linux-core/drm_memory.c
+++ b/linux-core/drm_memory.c
@@ -33,10 +33,78 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#include <linux/config.h>
 #include <linux/highmem.h>
 #include "drmP.h"
 
+static struct {
+	spinlock_t lock;
+	drm_u64_t cur_used;
+	drm_u64_t low_threshold;
+	drm_u64_t high_threshold;
+} drm_memctl = {
+	.lock = SPIN_LOCK_UNLOCKED
+};
+
+static inline size_t drm_size_align(size_t size) {
+
+	register size_t tmpSize = 4;
+	if (size > PAGE_SIZE)
+		return PAGE_ALIGN(size);
+
+	while(tmpSize < size)
+		tmpSize <<= 1;
+
+	return (size_t) tmpSize;
+}
+
+int drm_alloc_memctl(size_t size)
+{
+	int ret;
+	unsigned long a_size = drm_size_align(size);
+ 
+	spin_lock(&drm_memctl.lock);
+	ret = ((drm_memctl.cur_used + a_size) > drm_memctl.high_threshold) ? 
+		-ENOMEM : 0;
+	if (!ret) 
+		drm_memctl.cur_used += a_size;
+	spin_unlock(&drm_memctl.lock);
+	return ret;
+}
+EXPORT_SYMBOL(drm_alloc_memctl);
+	
+void drm_free_memctl(size_t size)
+{
+	unsigned long a_size = drm_size_align(size);
+
+	spin_lock(&drm_memctl.lock);
+	drm_memctl.cur_used -= a_size;
+	spin_unlock(&drm_memctl.lock);
+}
+EXPORT_SYMBOL(drm_free_memctl);
+
+void drm_query_memctl(drm_u64_t *cur_used,
+		      drm_u64_t *low_threshold,
+		      drm_u64_t *high_threshold) 
+{
+	spin_lock(&drm_memctl.lock);
+	*cur_used = drm_memctl.cur_used;
+	*low_threshold = drm_memctl.low_threshold;
+	*high_threshold = drm_memctl.high_threshold;
+	spin_unlock(&drm_memctl.lock);
+}	
+EXPORT_SYMBOL(drm_query_memctl);
+
+void drm_init_memctl(size_t p_low_threshold,
+		     size_t p_high_threshold)
+{
+	spin_lock(&drm_memctl.lock);
+	drm_memctl.cur_used = 0;
+	drm_memctl.low_threshold = p_low_threshold << PAGE_SHIFT;
+	drm_memctl.high_threshold = p_high_threshold << PAGE_SHIFT;
+	spin_unlock(&drm_memctl.lock);
+}
+
+
 #ifndef DEBUG_MEMORY
 
 /** No-op. */
diff --git a/linux-core/drm_memory.h b/linux-core/drm_memory.h
index 4a4fd5c..4a2c358 100644
--- a/linux-core/drm_memory.h
+++ b/linux-core/drm_memory.h
@@ -33,7 +33,6 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#include <linux/config.h>
 #include <linux/highmem.h>
 #include <linux/vmalloc.h>
 #include "drmP.h"
diff --git a/linux-core/drm_memory_debug.c b/linux-core/drm_memory_debug.c
index 2fe7aea..aa1b292 100644
--- a/linux-core/drm_memory_debug.c
+++ b/linux-core/drm_memory_debug.c
@@ -31,7 +31,6 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#include <linux/config.h>
 #include "drmP.h"
 
 #ifdef DEBUG_MEMORY
diff --git a/linux-core/drm_memory_debug.h b/linux-core/drm_memory_debug.h
index 706b752..1e0a63b 100644
--- a/linux-core/drm_memory_debug.h
+++ b/linux-core/drm_memory_debug.h
@@ -31,7 +31,6 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#include <linux/config.h>
 #include "drmP.h"
 
 typedef struct drm_mem_stats {
diff --git a/linux-core/drm_mm.c b/linux-core/drm_mm.c
index 617526b..a5566b2 100644
--- a/linux-core/drm_mm.c
+++ b/linux-core/drm_mm.c
@@ -42,36 +42,137 @@
  */
 
 #include "drmP.h"
+#include <linux/slab.h>
+
+unsigned long drm_mm_tail_space(drm_mm_t *mm)
+{
+	struct list_head *tail_node;
+	drm_mm_node_t *entry;
+
+	tail_node = mm->root_node.ml_entry.prev;
+	entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
+	if (!entry->free)
+		return 0;
+	
+	return entry->size;
+}
+
+int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size)
+{
+	struct list_head *tail_node;
+	drm_mm_node_t *entry;
+
+	tail_node = mm->root_node.ml_entry.prev;
+	entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
+	if (!entry->free)
+		return -ENOMEM;
+	
+	if (entry->size <= size)
+		return -ENOMEM;
+
+	entry->size -= size;
+	return 0;
+}
+
+
+static int drm_mm_create_tail_node(drm_mm_t *mm,
+			    unsigned long start,
+			    unsigned long size)
+{
+	drm_mm_node_t *child;
+	
+	child = (drm_mm_node_t *)
+		drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child),
+				    GFP_KERNEL);
+	if (!child)
+		return -ENOMEM;
+
+	child->free = 1;
+	child->size = size;
+	child->start = start;
+	child->mm = mm;
+
+	list_add_tail(&child->ml_entry, &mm->root_node.ml_entry);
+	list_add_tail(&child->fl_entry, &mm->root_node.fl_entry);
+
+	return 0;
+}
+	
+
+int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size)
+{
+	struct list_head *tail_node;
+	drm_mm_node_t *entry;
+
+	tail_node = mm->root_node.ml_entry.prev;
+	entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
+	if (!entry->free) {
+		return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
+	}
+	entry->size += size;
+	return 0;
+}
+	
+static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent,
+					    unsigned long size)
+{
+	drm_mm_node_t *child;
+	
+	child = (drm_mm_node_t *)
+		drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child),
+				    GFP_KERNEL);
+	if (!child)
+		return NULL;
+
+	INIT_LIST_HEAD(&child->fl_entry);
+
+	child->free = 0;
+	child->size = size;
+	child->start = parent->start;
+	child->mm = parent->mm;
+
+	list_add_tail(&child->ml_entry, &parent->ml_entry);
+	INIT_LIST_HEAD(&child->fl_entry);
+
+	parent->size -= size;
+	parent->start += size;
+	return child;
+}
+	
+	
 
 drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
 				unsigned long size, unsigned alignment)
 {
 
+	drm_mm_node_t *align_splitoff = NULL;
 	drm_mm_node_t *child;
+	unsigned tmp = 0;
 
 	if (alignment)
-		size += alignment - 1;
-
+		tmp = size % alignment;
+	
+	if (tmp) {
+		align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
+		if (!align_splitoff)
+			return NULL;
+	}
+		
 	if (parent->size == size) {
 		list_del_init(&parent->fl_entry);
 		parent->free = 0;
 		return parent;
 	} else {
-		child = (drm_mm_node_t *) drm_alloc(sizeof(*child), DRM_MEM_MM);
-		if (!child)
+		child = drm_mm_split_at_start(parent, size);
+		if (!child) {
+			if (align_splitoff) 
+				drm_mm_put_block(align_splitoff);
 			return NULL;
-
-		INIT_LIST_HEAD(&child->ml_entry);
-		INIT_LIST_HEAD(&child->fl_entry);
-
-		child->free = 0;
-		child->size = size;
-		child->start = parent->start;
-
-		list_add_tail(&child->ml_entry, &parent->ml_entry);
-		parent->size -= size;
-		parent->start += size;
+		}
 	}
+	if (align_splitoff)
+		drm_mm_put_block(align_splitoff);
+
 	return child;
 }
 
@@ -80,9 +181,10 @@
  * Otherwise add to the free stack.
  */
 
-void drm_mm_put_block(drm_mm_t * mm, drm_mm_node_t * cur)
+void drm_mm_put_block(drm_mm_node_t * cur)
 {
 
+	drm_mm_t *mm = cur->mm;
 	drm_mm_node_t *list_root = &mm->root_node;
 	struct list_head *cur_head = &cur->ml_entry;
 	struct list_head *root_head = &list_root->ml_entry;
@@ -105,8 +207,9 @@
 				prev_node->size += next_node->size;
 				list_del(&next_node->ml_entry);
 				list_del(&next_node->fl_entry);
-				drm_free(next_node, sizeof(*next_node),
-					 DRM_MEM_MM);
+				drm_ctl_cache_free(drm_cache.mm,
+						   sizeof(*next_node),
+						   next_node);
 			} else {
 				next_node->size += cur->size;
 				next_node->start = cur->start;
@@ -119,7 +222,7 @@
 		list_add(&cur->fl_entry, &list_root->fl_entry);
 	} else {
 		list_del(&cur->ml_entry);
-		drm_free(cur, sizeof(*cur), DRM_MEM_MM);
+		drm_ctl_cache_free(drm_cache.mm, sizeof(*cur), cur);
 	}
 }
 
@@ -132,16 +235,23 @@
 	drm_mm_node_t *entry;
 	drm_mm_node_t *best;
 	unsigned long best_size;
+	unsigned wasted;
 
 	best = NULL;
 	best_size = ~0UL;
 
-	if (alignment)
-		size += alignment - 1;
-
 	list_for_each(list, free_stack) {
 		entry = list_entry(list, drm_mm_node_t, fl_entry);
-		if (entry->size >= size) {
+		wasted = 0;
+
+		if (alignment) {
+			register unsigned tmp = size % alignment;
+			if (tmp) 
+				wasted += alignment - tmp;
+		}
+
+
+		if (entry->size >= size + wasted) {
 			if (!best_match)
 				return entry;
 			if (size < best_size) {
@@ -154,27 +264,19 @@
 	return best;
 }
 
+int drm_mm_clean(drm_mm_t * mm)
+{
+	struct list_head *head = &mm->root_node.ml_entry;
+
+	return (head->next->next == head);
+}
+
 int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size)
 {
-	drm_mm_node_t *child;
-
 	INIT_LIST_HEAD(&mm->root_node.ml_entry);
 	INIT_LIST_HEAD(&mm->root_node.fl_entry);
-	child = (drm_mm_node_t *) drm_alloc(sizeof(*child), DRM_MEM_MM);
-	if (!child)
-		return -ENOMEM;
 
-	INIT_LIST_HEAD(&child->ml_entry);
-	INIT_LIST_HEAD(&child->fl_entry);
-
-	child->start = start;
-	child->size = size;
-	child->free = 1;
-
-	list_add(&child->fl_entry, &mm->root_node.fl_entry);
-	list_add(&child->ml_entry, &mm->root_node.ml_entry);
-
-	return 0;
+	return drm_mm_create_tail_node(mm, start, size);
 }
 
 EXPORT_SYMBOL(drm_mm_init);
@@ -194,8 +296,7 @@
 
 	list_del(&entry->fl_entry);
 	list_del(&entry->ml_entry);
-
-	drm_free(entry, sizeof(*entry), DRM_MEM_MM);
+	drm_ctl_cache_free(drm_cache.mm, sizeof(*entry), entry);
 }
 
 EXPORT_SYMBOL(drm_mm_takedown);
diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c
new file mode 100644
index 0000000..0157329
--- /dev/null
+++ b/linux-core/drm_object.c
@@ -0,0 +1,287 @@
+/**************************************************************************
+ * 
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ * 
+ * 
+ **************************************************************************/
+
+#include "drmP.h"
+
+int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item,
+			int shareable)
+{
+	drm_device_t *dev = priv->head->dev;
+	int ret;
+
+	atomic_set(&item->refcount, 1);
+	item->shareable = shareable;
+	item->owner = priv;
+
+	ret = drm_ht_just_insert_please(&dev->object_hash, &item->hash,
+					(unsigned long)item, 32, 0, 0);
+	if (ret)
+		return ret;
+
+	list_add_tail(&item->list, &priv->user_objects);
+	return 0;
+}
+
+drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, uint32_t key)
+{
+	drm_device_t *dev = priv->head->dev;
+	drm_hash_item_t *hash;
+	int ret;
+	drm_user_object_t *item;
+
+	ret = drm_ht_find_item(&dev->object_hash, key, &hash);
+	if (ret) {
+		return NULL;
+	}
+	item = drm_hash_entry(hash, drm_user_object_t, hash);
+
+	if (priv != item->owner) {
+		drm_open_hash_t *ht = &priv->refd_object_hash[_DRM_REF_USE];
+		ret = drm_ht_find_item(ht, (unsigned long)item, &hash);
+		if (ret) {
+			DRM_ERROR("Object not registered for usage\n");
+			return NULL;
+		}
+	}
+	return item;
+}
+
+static void drm_deref_user_object(drm_file_t * priv, drm_user_object_t * item)
+{
+	drm_device_t *dev = priv->head->dev;
+	int ret;
+
+	if (atomic_dec_and_test(&item->refcount)) {
+		ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
+		BUG_ON(ret);
+		list_del_init(&item->list);
+		item->remove(priv, item);
+	}
+}
+
+int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item)
+{
+	if (item->owner != priv) {
+		DRM_ERROR("Cannot destroy object not owned by you.\n");
+		return -EINVAL;
+	}
+	item->owner = 0;
+	item->shareable = 0;
+	list_del_init(&item->list);
+	drm_deref_user_object(priv, item);
+	return 0;
+}
+
+static int drm_object_ref_action(drm_file_t * priv, drm_user_object_t * ro,
+				 drm_ref_t action)
+{
+	int ret = 0;
+
+	switch (action) {
+	case _DRM_REF_USE:
+		atomic_inc(&ro->refcount);
+		break;
+	default:
+		if (!ro->ref_struct_locked) {
+			break;
+		} else {
+			ro->ref_struct_locked(priv, ro, action);
+		}
+	}
+	return ret;
+}
+
+int drm_add_ref_object(drm_file_t * priv, drm_user_object_t * referenced_object,
+		       drm_ref_t ref_action)
+{
+	int ret = 0;
+	drm_ref_object_t *item;
+	drm_open_hash_t *ht = &priv->refd_object_hash[ref_action];
+
+	if (!referenced_object->shareable && priv != referenced_object->owner) {
+		DRM_ERROR("Not allowed to reference this object\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * If this is not a usage reference, Check that usage has been registered
+	 * first. Otherwise strange things may happen on destruction.
+	 */
+
+	if ((ref_action != _DRM_REF_USE) && priv != referenced_object->owner) {
+		item =
+		    drm_lookup_ref_object(priv, referenced_object,
+					  _DRM_REF_USE);
+		if (!item) {
+			DRM_ERROR
+			    ("Object not registered for usage by this client\n");
+			return -EINVAL;
+		}
+	}
+
+	if (NULL !=
+	    (item =
+	     drm_lookup_ref_object(priv, referenced_object, ref_action))) {
+		atomic_inc(&item->refcount);
+		return drm_object_ref_action(priv, referenced_object,
+					     ref_action);
+	}
+
+	item = drm_ctl_calloc(1, sizeof(*item), DRM_MEM_OBJECTS);
+	if (item == NULL) {
+		DRM_ERROR("Could not allocate reference object\n");
+		return -ENOMEM;
+	}
+
+	atomic_set(&item->refcount, 1);
+	item->hash.key = (unsigned long)referenced_object;
+	ret = drm_ht_insert_item(ht, &item->hash);
+	item->unref_action = ref_action;
+
+	if (ret)
+		goto out;
+
+	list_add(&item->list, &priv->refd_objects);
+	ret = drm_object_ref_action(priv, referenced_object, ref_action);
+      out:
+	return ret;
+}
+
+drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv,
+					drm_user_object_t * referenced_object,
+					drm_ref_t ref_action)
+{
+	drm_hash_item_t *hash;
+	int ret;
+
+	ret = drm_ht_find_item(&priv->refd_object_hash[ref_action],
+			       (unsigned long)referenced_object, &hash);
+	if (ret)
+		return NULL;
+
+	return drm_hash_entry(hash, drm_ref_object_t, hash);
+}
+
+static void drm_remove_other_references(drm_file_t * priv,
+					drm_user_object_t * ro)
+{
+	int i;
+	drm_open_hash_t *ht;
+	drm_hash_item_t *hash;
+	drm_ref_object_t *item;
+
+	for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) {
+		ht = &priv->refd_object_hash[i];
+		while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) {
+			item = drm_hash_entry(hash, drm_ref_object_t, hash);
+			drm_remove_ref_object(priv, item);
+		}
+	}
+}
+
+void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item)
+{
+	int ret;
+	drm_user_object_t *user_object = (drm_user_object_t *) item->hash.key;
+	drm_open_hash_t *ht = &priv->refd_object_hash[item->unref_action];
+	drm_ref_t unref_action;
+
+	unref_action = item->unref_action;
+	if (atomic_dec_and_test(&item->refcount)) {
+		ret = drm_ht_remove_item(ht, &item->hash);
+		BUG_ON(ret);
+		list_del_init(&item->list);
+		if (unref_action == _DRM_REF_USE)
+			drm_remove_other_references(priv, user_object);
+		drm_ctl_free(item, sizeof(*item), DRM_MEM_OBJECTS);
+	}
+
+	switch (unref_action) {
+	case _DRM_REF_USE:
+		drm_deref_user_object(priv, user_object);
+		break;
+	default:
+		BUG_ON(!user_object->unref);
+		user_object->unref(priv, user_object, unref_action);
+		break;
+	}
+
+}
+
+int drm_user_object_ref(drm_file_t * priv, uint32_t user_token,
+			drm_object_type_t type, drm_user_object_t ** object)
+{
+	drm_device_t *dev = priv->head->dev;
+	drm_user_object_t *uo;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	uo = drm_lookup_user_object(priv, user_token);
+	if (!uo || (uo->type != type)) {
+		ret = -EINVAL;
+		goto out_err;
+	}
+	ret = drm_add_ref_object(priv, uo, _DRM_REF_USE);
+	if (ret)
+		goto out_err;
+	mutex_unlock(&dev->struct_mutex);
+	*object = uo;
+	DRM_ERROR("Referenced an object\n");
+	return 0;
+      out_err:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+int drm_user_object_unref(drm_file_t * priv, uint32_t user_token,
+			  drm_object_type_t type)
+{
+	drm_device_t *dev = priv->head->dev;
+	drm_user_object_t *uo;
+	drm_ref_object_t *ro;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	uo = drm_lookup_user_object(priv, user_token);
+	if (!uo || (uo->type != type)) {
+		ret = -EINVAL;
+		goto out_err;
+	}
+	ro = drm_lookup_ref_object(priv, uo, _DRM_REF_USE);
+	if (!ro) {
+		ret = -EINVAL;
+		goto out_err;
+	}
+	drm_remove_ref_object(priv, ro);
+	mutex_unlock(&dev->struct_mutex);
+	DRM_ERROR("Unreferenced an object\n");
+	return 0;
+      out_err:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c
index 512a8f7..863cacf 100644
--- a/linux-core/drm_proc.c
+++ b/linux-core/drm_proc.c
@@ -49,6 +49,8 @@
 			   int request, int *eof, void *data);
 static int drm_bufs_info(char *buf, char **start, off_t offset,
 			 int request, int *eof, void *data);
+static int drm_objects_info(char *buf, char **start, off_t offset,
+			 int request, int *eof, void *data);
 #if DRM_DEBUG_CODE
 static int drm_vma_info(char *buf, char **start, off_t offset,
 			int request, int *eof, void *data);
@@ -67,6 +69,7 @@
 	{"clients", drm_clients_info},
 	{"queues", drm_queues_info},
 	{"bufs", drm_bufs_info},
+	{"objects", drm_objects_info},
 #if DRM_DEBUG_CODE
 	{"vma", drm_vma_info},
 #endif
@@ -238,10 +241,11 @@
 			type = "??";
 		else
 			type = types[map->type];
-		DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s  0x%02x 0x%08x ",
+		DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s  0x%02x 0x%08lx ",
 			       i,
 			       map->offset,
-			       map->size, type, map->flags, r_list->user_token);
+			       map->size, type, map->flags, 
+			       (unsigned long) r_list->user_token);
 
 		if (map->mtrr < 0) {
 			DRM_PROC_PRINT("none\n");
@@ -418,6 +422,89 @@
 }
 
 /**
+ * Called when "/proc/dri/.../objects" is read.
+ *
+ * \param buf output buffer.
+ * \param start start of output data.
+ * \param offset requested start offset.
+ * \param request requested number of bytes.
+ * \param eof whether there is no more data to return.
+ * \param data private data.
+ * \return number of written bytes.
+ */
+static int drm__objects_info(char *buf, char **start, off_t offset, int request,
+			  int *eof, void *data)
+{
+	drm_device_t *dev = (drm_device_t *) data;
+	int len = 0;
+	drm_buffer_manager_t *bm = &dev->bm;
+	drm_fence_manager_t *fm = &dev->fm; 
+	drm_u64_t used_mem;
+	drm_u64_t low_mem;
+	drm_u64_t high_mem;
+
+
+	if (offset > DRM_PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	*start = &buf[offset];
+	*eof = 0;
+	
+	if (fm->initialized) {
+		DRM_PROC_PRINT("Number of active fence objects: %d.\n\n", 
+			       atomic_read(&fm->count));
+	} else {
+		DRM_PROC_PRINT("Fence objects are not supported by this driver\n\n");
+	}
+
+	if (bm->initialized) {
+		DRM_PROC_PRINT("Number of active buffer objects: %d.\n\n", 
+			       atomic_read(&bm->count));
+		DRM_PROC_PRINT("Number of locked GATT pages: %lu.\n", bm->cur_pages);
+	} else {
+		DRM_PROC_PRINT("Buffer objects are not supported by this driver.\n\n");
+	}
+
+	drm_query_memctl(&used_mem, &low_mem, &high_mem);
+
+	if (used_mem > 16*PAGE_SIZE) { 
+		DRM_PROC_PRINT("Used object memory is %lu pages.\n", 
+			       (unsigned long) (used_mem >> PAGE_SHIFT));
+	} else {
+		DRM_PROC_PRINT("Used object memory is %lu bytes.\n", 
+			       (unsigned long) used_mem);
+	}
+	DRM_PROC_PRINT("Soft object memory usage threshold is %lu pages.\n", 
+		       (unsigned long) (low_mem >> PAGE_SHIFT));
+	DRM_PROC_PRINT("Hard object memory usage threshold is %lu pages.\n", 
+		       (unsigned long) (high_mem >> PAGE_SHIFT));
+
+	DRM_PROC_PRINT("\n");
+
+	if (len > request + offset)
+		return request;
+	*eof = 1;
+	return len - offset;
+}
+
+/**
+ * Simply calls _objects_info() while holding the drm_device::struct_mutex lock.
+ */
+static int drm_objects_info(char *buf, char **start, off_t offset, int request,
+			 int *eof, void *data)
+{
+	drm_device_t *dev = (drm_device_t *) data;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	ret = drm__objects_info(buf, start, offset, request, eof, data);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+/**
  * Called when "/proc/dri/.../clients" is read.
  *
  * \param buf output buffer.
@@ -500,7 +587,7 @@
 	for (pt = dev->vmalist; pt; pt = pt->next) {
 		if (!(vma = pt->vma))
 			continue;
-		DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx",
+		DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
 			       pt->pid,
 			       vma->vm_start,
 			       vma->vm_end,
@@ -510,7 +597,7 @@
 			       vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
 			       vma->vm_flags & VM_LOCKED ? 'l' : '-',
 			       vma->vm_flags & VM_IO ? 'i' : '-',
-			       VM_OFFSET(vma));
+			       vma->vm_pgoff);
 
 #if defined(__i386__)
 		pgprot = pgprot_val(vma->vm_page_prot);
diff --git a/linux-core/drm_scatter.c b/linux-core/drm_scatter.c
index a7144f1..e5c9f87 100644
--- a/linux-core/drm_scatter.c
+++ b/linux-core/drm_scatter.c
@@ -31,7 +31,6 @@
  * DEALINGS IN THE SOFTWARE.
  */
 
-#include <linux/config.h>
 #include <linux/vmalloc.h>
 #include "drmP.h"
 
diff --git a/linux-core/drm_sman.c b/linux-core/drm_sman.c
index 425c823..19a13f3 100644
--- a/linux-core/drm_sman.c
+++ b/linux-core/drm_sman.c
@@ -101,10 +101,9 @@
 
 static void drm_sman_mm_free(void *private, void *ref)
 {
-	drm_mm_t *mm = (drm_mm_t *) private;
 	drm_mm_node_t *node = (drm_mm_node_t *) ref;
 
-	drm_mm_put_block(mm, node);
+	drm_mm_put_block(node);
 }
 
 static void drm_sman_mm_destroy(void *private)
diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c
index 839cf44..c03a56a 100644
--- a/linux-core/drm_stub.c
+++ b/linux-core/drm_stub.c
@@ -54,6 +54,11 @@
 struct drm_sysfs_class *drm_class;
 struct proc_dir_entry *drm_proc_root;
 
+drm_cache_t drm_cache =
+{ .mm = NULL,
+  .fence_object = NULL
+};
+
 static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
 		       const struct pci_device_id *ent,
 		       struct drm_driver *driver)
@@ -66,6 +71,7 @@
 	init_timer(&dev->timer);
 	mutex_init(&dev->struct_mutex);
 	mutex_init(&dev->ctxlist_mutex);
+	mutex_init(&dev->bm.init_mutex);
 
 	dev->pdev = pdev;
 	dev->pci_device = pdev->device;
@@ -76,14 +82,28 @@
 #endif
 	dev->irq = pdev->irq;
 
+	if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER)) {
+		drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
+		return -ENOMEM;
+	}
+	if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START, 
+			DRM_FILE_PAGE_OFFSET_SIZE)) {
+		drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
+		drm_ht_remove(&dev->map_hash);
+		return -ENOMEM;
+	}
+
+	if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) {
+                drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
+		drm_ht_remove(&dev->map_hash);
+		drm_mm_takedown(&dev->offset_manager);
+		return -ENOMEM;
+	}
+
 	dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS);
 	if (dev->maplist == NULL)
 		return -ENOMEM;
 	INIT_LIST_HEAD(&dev->maplist->head);
-	if (drm_ht_create(&dev->map_hash, 12)) {
-		drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
-		return -ENOMEM;
-	}
 
 	/* the DRM has 6 counters */
 	dev->counters = 6;
@@ -125,6 +145,7 @@
 		goto error_out_unreg;
 	}
 
+	drm_fence_manager_init(dev);
 	return 0;
 
 error_out_unreg:
diff --git a/linux-core/drm_sysfs.c b/linux-core/drm_sysfs.c
index df75d7b..e5dd053 100644
--- a/linux-core/drm_sysfs.c
+++ b/linux-core/drm_sysfs.c
@@ -11,7 +11,6 @@
  *
  */
 
-#include <linux/config.h>
 #include <linux/device.h>
 #include <linux/kdev_t.h>
 #include <linux/err.h>
diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c
new file mode 100644
index 0000000..931972a
--- /dev/null
+++ b/linux-core/drm_ttm.c
@@ -0,0 +1,519 @@
+/**************************************************************************
+ * 
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ * 
+ * 
+ **************************************************************************/
+
+#include "drmP.h"
+
+static void drm_ttm_ipi_handler(void *null)
+{
+	flush_agp_cache();
+}
+
+static void drm_ttm_cache_flush(void) 
+{
+	if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)
+		DRM_ERROR("Timed out waiting for drm cache flush.\n");
+}
+
+
+/*
+ * Use kmalloc if possible. Otherwise fall back to vmalloc.
+ */
+
+static void *ttm_alloc(unsigned long size, int type)
+{
+	void *ret = NULL;
+
+	if (drm_alloc_memctl(size))
+		return NULL;
+	if (size <= PAGE_SIZE) {
+		ret = drm_alloc(size, type);
+	}
+	if (!ret) {
+		ret = vmalloc(size);
+	}
+	if (!ret) {
+		drm_free_memctl(size);
+	}
+	return ret;
+}
+
+static void ttm_free(void *pointer, unsigned long size, int type)
+{
+
+	if ((unsigned long)pointer >= VMALLOC_START &&
+	    (unsigned long)pointer <= VMALLOC_END) {
+		vfree(pointer);
+	} else {
+		drm_free(pointer, size, type);
+	}
+	drm_free_memctl(size);
+}
+
+/*
+ * Unmap all vma pages from vmas mapping this ttm.
+ */
+
+static int unmap_vma_pages(drm_ttm_t * ttm)
+{
+	drm_device_t *dev = ttm->dev;
+	loff_t offset = ((loff_t) ttm->mapping_offset) << PAGE_SHIFT;
+	loff_t holelen = ((loff_t) ttm->num_pages) << PAGE_SHIFT;
+
+#ifdef DRM_ODD_MM_COMPAT
+	int ret;
+	ret = drm_ttm_lock_mm(ttm);
+	if (ret)
+		return ret;
+#endif
+	unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
+#ifdef DRM_ODD_MM_COMPAT
+	drm_ttm_finish_unmap(ttm);
+#endif
+	return 0;
+}
+
+/*
+ * Change caching policy for the linear kernel map 
+ * for range of pages in a ttm.
+ */
+
+static int drm_set_caching(drm_ttm_t * ttm, int noncached)
+{
+	int i;
+	struct page **cur_page;
+	int do_tlbflush = 0;
+
+	if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
+		return 0;
+
+	if (noncached) 
+		drm_ttm_cache_flush();
+
+	for (i = 0; i < ttm->num_pages; ++i) {
+		cur_page = ttm->pages + i;
+		if (*cur_page) {
+			if (!PageHighMem(*cur_page)) {
+				if (noncached) {
+					map_page_into_agp(*cur_page);
+				} else {
+					unmap_page_from_agp(*cur_page);
+				}
+				do_tlbflush = 1;
+			}
+		}
+	}
+	if (do_tlbflush)
+		flush_agp_mappings();
+
+	DRM_MASK_VAL(ttm->page_flags, DRM_TTM_PAGE_UNCACHED, noncached);
+
+	return 0;
+}
+
+/*
+ * Free all resources associated with a ttm.
+ */
+
+int drm_destroy_ttm(drm_ttm_t * ttm)
+{
+
+	int i;
+	struct page **cur_page;
+	drm_ttm_backend_t *be;
+
+	if (!ttm)
+		return 0;
+
+	if (atomic_read(&ttm->vma_count) > 0) {
+		ttm->destroy = 1;
+		DRM_ERROR("VMAs are still alive. Skipping destruction.\n");
+		return -EBUSY;
+	}
+
+	DRM_DEBUG("Destroying a ttm\n");
+
+#ifdef DRM_TTM_ODD_COMPAT
+	BUG_ON(!list_empty(&ttm->vma_list));
+	BUG_ON(!list_empty(&ttm->p_mm_list));
+#endif
+	be = ttm->be;
+	if (be) {
+		be->destroy(be);
+		ttm->be = NULL;
+	}
+
+	if (ttm->pages) {
+		drm_buffer_manager_t *bm = &ttm->dev->bm;
+		if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
+			drm_set_caching(ttm, 0);
+
+		for (i = 0; i < ttm->num_pages; ++i) {
+			cur_page = ttm->pages + i;
+			if (*cur_page) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
+				unlock_page(*cur_page);
+#else
+				ClearPageReserved(*cur_page);
+#endif
+				if (page_count(*cur_page) != 1) {
+					DRM_ERROR("Erroneous page count. "
+						  "Leaking pages.\n");
+				}
+				if (page_mapped(*cur_page)) {
+					DRM_ERROR("Erroneous map count. "
+						  "Leaking page mappings.\n");
+				}
+
+				/*
+				 * End debugging.
+				 */
+
+				drm_free_gatt_pages(*cur_page, 0);
+				drm_free_memctl(PAGE_SIZE);
+				--bm->cur_pages;
+			}
+		}
+		ttm_free(ttm->pages, ttm->num_pages * sizeof(*ttm->pages),
+			 DRM_MEM_TTM);
+		ttm->pages = NULL;
+	}
+
+	drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM);
+	return 0;
+}
+
+static int drm_ttm_populate(drm_ttm_t * ttm)
+{
+	struct page *page;
+	unsigned long i;
+	drm_buffer_manager_t *bm;
+	drm_ttm_backend_t *be;
+
+	if (ttm->state != ttm_unpopulated)
+		return 0;
+
+	bm = &ttm->dev->bm;
+	be = ttm->be;
+	for (i = 0; i < ttm->num_pages; ++i) {
+		page = ttm->pages[i];
+		if (!page) {
+			if (drm_alloc_memctl(PAGE_SIZE)) {
+				return -ENOMEM;
+			}
+			page = drm_alloc_gatt_pages(0);
+			if (!page) {
+				drm_free_memctl(PAGE_SIZE);
+				return -ENOMEM;
+			}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
+			SetPageLocked(page);
+#else
+			SetPageReserved(page);
+#endif
+			ttm->pages[i] = page;
+			++bm->cur_pages;
+		}
+	}
+	be->populate(be, ttm->num_pages, ttm->pages);
+	ttm->state = ttm_unbound;
+	return 0;
+}
+
+/*
+ * Initialize a ttm.
+ */
+
+static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
+{
+	drm_bo_driver_t *bo_driver = dev->driver->bo_driver;
+	drm_ttm_t *ttm;
+
+	if (!bo_driver)
+		return NULL;
+
+	ttm = drm_ctl_calloc(1, sizeof(*ttm), DRM_MEM_TTM);
+	if (!ttm)
+		return NULL;
+
+#ifdef DRM_ODD_MM_COMPAT
+	INIT_LIST_HEAD(&ttm->p_mm_list);
+	INIT_LIST_HEAD(&ttm->vma_list);
+#endif
+
+	ttm->dev = dev;
+	atomic_set(&ttm->vma_count, 0);
+
+	ttm->destroy = 0;
+	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+	ttm->page_flags = 0;
+
+	/*
+	 * Account also for AGP module memory usage.
+	 */
+
+	ttm->pages = ttm_alloc(ttm->num_pages * sizeof(*ttm->pages),
+			       DRM_MEM_TTM);
+	if (!ttm->pages) {
+		drm_destroy_ttm(ttm);
+		DRM_ERROR("Failed allocating page table\n");
+		return NULL;
+	}
+	memset(ttm->pages, 0, ttm->num_pages * sizeof(*ttm->pages));
+	ttm->be = bo_driver->create_ttm_backend_entry(dev);
+	if (!ttm->be) {
+		drm_destroy_ttm(ttm);
+		DRM_ERROR("Failed creating ttm backend entry\n");
+		return NULL;
+	}
+	ttm->state = ttm_unpopulated;
+	return ttm;
+}
+
+/*
+ * Unbind a ttm region from the aperture.
+ */
+
+int drm_evict_ttm(drm_ttm_t * ttm)
+{
+	drm_ttm_backend_t *be = ttm->be;
+	int ret;
+
+	switch (ttm->state) {
+	case ttm_bound:
+		if (be->needs_ub_cache_adjust(be)) {
+			ret = unmap_vma_pages(ttm);
+			if (ret) {
+				return ret;
+			}
+		}
+		be->unbind(be);
+		break;
+	default:
+		break;
+	}
+	ttm->state = ttm_evicted;
+	return 0;
+}
+
+void drm_fixup_ttm_caching(drm_ttm_t * ttm)
+{
+
+	if (ttm->state == ttm_evicted) {
+		drm_ttm_backend_t *be = ttm->be;
+		if (be->needs_ub_cache_adjust(be)) {
+			drm_set_caching(ttm, 0);
+		}
+		ttm->state = ttm_unbound;
+	}
+}
+
+int drm_unbind_ttm(drm_ttm_t * ttm)
+{
+	int ret = 0;
+
+	if (ttm->state == ttm_bound)
+		ret = drm_evict_ttm(ttm);
+
+	if (ret)
+		return ret;
+
+	drm_fixup_ttm_caching(ttm);
+	return 0;
+}
+
+int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
+{
+
+	int ret = 0;
+	drm_ttm_backend_t *be;
+
+	if (!ttm)
+		return -EINVAL;
+	if (ttm->state == ttm_bound)
+		return 0;
+
+	be = ttm->be;
+
+	ret = drm_ttm_populate(ttm);
+	if (ret)
+		return ret;
+	if (ttm->state == ttm_unbound && !cached) {
+		ret = unmap_vma_pages(ttm);
+		if (ret)
+			return ret;
+
+		drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
+	}
+#ifdef DRM_ODD_MM_COMPAT
+	else if (ttm->state == ttm_evicted && !cached) {
+		ret = drm_ttm_lock_mm(ttm);
+		if (ret)
+			return ret;
+	}
+#endif
+	if ((ret = be->bind(be, aper_offset, cached))) {
+		ttm->state = ttm_evicted;
+#ifdef DRM_ODD_MM_COMPAT
+		if (be->needs_ub_cache_adjust(be))
+			drm_ttm_unlock_mm(ttm);
+#endif
+		DRM_ERROR("Couldn't bind backend.\n");
+		return ret;
+	}
+
+	ttm->aper_offset = aper_offset;
+	ttm->state = ttm_bound;
+
+#ifdef DRM_ODD_MM_COMPAT
+	if (be->needs_ub_cache_adjust(be)) {
+		ret = drm_ttm_remap_bound(ttm);
+		if (ret)
+			return ret;
+	}
+#endif
+
+	return 0;
+}
+
+/*
+ * dev->struct_mutex locked.
+ */
+static void drm_ttm_object_remove(drm_device_t * dev, drm_ttm_object_t * object)
+{
+	drm_map_list_t *list = &object->map_list;
+	drm_local_map_t *map;
+
+	if (list->user_token)
+		drm_ht_remove_item(&dev->map_hash, &list->hash);
+
+	if (list->file_offset_node) {
+		drm_mm_put_block(list->file_offset_node);
+		list->file_offset_node = NULL;
+	}
+
+	map = list->map;
+
+	if (map) {
+		drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
+		if (ttm) {
+			if (drm_destroy_ttm(ttm) != -EBUSY) {
+				drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
+			}
+		} else {
+			drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
+		}
+	}
+
+	drm_ctl_free(object, sizeof(*object), DRM_MEM_TTM);
+}
+
+void drm_ttm_object_deref_locked(drm_device_t * dev, drm_ttm_object_t * to)
+{
+	if (atomic_dec_and_test(&to->usage)) {
+		drm_ttm_object_remove(dev, to);
+	}
+}
+
+void drm_ttm_object_deref_unlocked(drm_device_t * dev, drm_ttm_object_t * to)
+{
+	if (atomic_dec_and_test(&to->usage)) {
+		mutex_lock(&dev->struct_mutex);
+		if (atomic_read(&to->usage) == 0)
+			drm_ttm_object_remove(dev, to);
+		mutex_unlock(&dev->struct_mutex);
+	}
+}
+
+/*
+ * Create a ttm and add it to the drm book-keeping. 
+ * dev->struct_mutex locked.
+ */
+
+int drm_ttm_object_create(drm_device_t * dev, unsigned long size,
+			  uint32_t flags, drm_ttm_object_t ** ttm_object)
+{
+	drm_ttm_object_t *object;
+	drm_map_list_t *list;
+	drm_local_map_t *map;
+	drm_ttm_t *ttm;
+
+	object = drm_ctl_calloc(1, sizeof(*object), DRM_MEM_TTM);
+	if (!object)
+		return -ENOMEM;
+	object->flags = flags;
+	list = &object->map_list;
+
+	list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_TTM);
+	if (!list->map) {
+		drm_ttm_object_remove(dev, object);
+		return -ENOMEM;
+	}
+	map = list->map;
+
+	ttm = drm_init_ttm(dev, size);
+	if (!ttm) {
+		DRM_ERROR("Could not create ttm\n");
+		drm_ttm_object_remove(dev, object);
+		return -ENOMEM;
+	}
+
+	map->offset = (unsigned long)ttm;
+	map->type = _DRM_TTM;
+	map->flags = _DRM_REMOVABLE;
+	map->size = ttm->num_pages * PAGE_SIZE;
+	map->handle = (void *)object;
+
+	/*
+	 * Add a one-page "hole" to the block size to avoid the mm subsystem
+	 * merging vmas.
+	 * FIXME: Is this really needed?
+	 */
+
+	list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
+						    ttm->num_pages + 1, 0, 0);
+	if (!list->file_offset_node) {
+		drm_ttm_object_remove(dev, object);
+		return -ENOMEM;
+	}
+	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+						  ttm->num_pages + 1, 0);
+
+	list->hash.key = list->file_offset_node->start;
+
+	if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
+		drm_ttm_object_remove(dev, object);
+		return -ENOMEM;
+	}
+
+	list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;
+	ttm->mapping_offset = list->hash.key;
+	atomic_set(&object->usage, 1);
+	*ttm_object = object;
+	return 0;
+}
diff --git a/linux-core/drm_ttm.h b/linux-core/drm_ttm.h
new file mode 100644
index 0000000..11a1375
--- /dev/null
+++ b/linux-core/drm_ttm.h
@@ -0,0 +1,145 @@
+/**************************************************************************
+ * 
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ * 
+ * 
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef _DRM_TTM_H
+#define _DRM_TTM_H
+#define DRM_HAS_TTM
+
+/*
+ * The backend GART interface. (In our case AGP). Any similar type of device (PCIE?)
+ * needs only to implement these functions to be usable with the "TTM" interface.
+ * The AGP backend implementation lives in drm_agpsupport.c 
+ * basically maps these calls to available functions in agpgart. Each drm device driver gets an
+ * additional function pointer that creates these types, 
+ * so that the device can choose the correct aperture.
+ * (Multiple AGP apertures, etc.) 
+ * Most device drivers will let this point to the standard AGP implementation.
+ */
+
+#define DRM_BE_FLAG_NEEDS_FREE     0x00000001
+#define DRM_BE_FLAG_BOUND_CACHED   0x00000002
+#define DRM_BE_FLAG_CBA            0x00000004
+
+typedef struct drm_ttm_backend {
+	unsigned long aperture_base;
+	void *private;
+	uint32_t flags;
+	uint32_t drm_map_type;
+	int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend);
+	int (*populate) (struct drm_ttm_backend * backend,
+			 unsigned long num_pages, struct page ** pages);
+	void (*clear) (struct drm_ttm_backend * backend);
+	int (*bind) (struct drm_ttm_backend * backend,
+		     unsigned long offset, int cached);
+	int (*unbind) (struct drm_ttm_backend * backend);
+	void (*destroy) (struct drm_ttm_backend * backend);
+} drm_ttm_backend_t;
+
+typedef struct drm_ttm {
+	struct page **pages;
+	uint32_t page_flags;
+	unsigned long num_pages;
+	unsigned long aper_offset;
+	atomic_t vma_count;
+	struct drm_device *dev;
+	int destroy;
+	uint32_t mapping_offset;
+	drm_ttm_backend_t *be;
+	enum {
+		ttm_bound,
+		ttm_evicted,
+		ttm_unbound,
+		ttm_unpopulated,
+	} state;
+#ifdef DRM_ODD_MM_COMPAT
+	struct list_head vma_list;
+	struct list_head p_mm_list;
+#endif
+
+} drm_ttm_t;
+
+typedef struct drm_ttm_object {
+	atomic_t usage;
+	uint32_t flags;
+	drm_map_list_t map_list;
+} drm_ttm_object_t;
+
+extern int drm_ttm_object_create(struct drm_device *dev, unsigned long size,
+				 uint32_t flags,
+				 drm_ttm_object_t ** ttm_object);
+extern void drm_ttm_object_deref_locked(struct drm_device *dev,
+					drm_ttm_object_t * to);
+extern void drm_ttm_object_deref_unlocked(struct drm_device *dev,
+					  drm_ttm_object_t * to);
+extern drm_ttm_object_t *drm_lookup_ttm_object(drm_file_t * priv,
+					       uint32_t handle,
+					       int check_owner);
+extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset);
+
+extern int drm_unbind_ttm(drm_ttm_t * ttm);
+
+/*
+ * Evict a ttm region. Keeps Aperture caching policy.
+ */
+
+extern int drm_evict_ttm(drm_ttm_t * ttm);
+extern void drm_fixup_ttm_caching(drm_ttm_t * ttm);
+
+/*
+ * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this, 
+ * which calls this function iff there are no vmas referencing it anymore. Otherwise it is called
+ * when the last vma exits.
+ */
+
+extern int drm_destroy_ttm(drm_ttm_t * ttm);
+extern int drm_ttm_ioctl(DRM_IOCTL_ARGS);
+
+static __inline__ drm_ttm_t *drm_ttm_from_object(drm_ttm_object_t * to)
+{
+	return (drm_ttm_t *) to->map_list.map->offset;
+}
+
+#define DRM_MASK_VAL(dest, mask, val)			\
+  (dest) = ((dest) & ~(mask)) | ((val) & (mask));
+
+#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
+#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
+
+/*
+ * Page flags.
+ */
+
+#define DRM_TTM_PAGE_UNCACHED 0x01
+#define DRM_TTM_PAGE_USED     0x02
+#define DRM_TTM_PAGE_BOUND    0x04
+#define DRM_TTM_PAGE_PRESENT  0x08
+
+#endif
diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c
index adff7d1..6eb996a 100644
--- a/linux-core/drm_vm.c
+++ b/linux-core/drm_vm.c
@@ -34,12 +34,42 @@
  */
 
 #include "drmP.h"
+
 #if defined(__ia64__)
 #include <linux/efi.h>
 #endif
 
 static void drm_vm_open(struct vm_area_struct *vma);
 static void drm_vm_close(struct vm_area_struct *vma);
+static void drm_vm_ttm_close(struct vm_area_struct *vma);
+static int drm_vm_ttm_open(struct vm_area_struct *vma);
+static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma);
+
+
+pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
+{
+	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
+
+#if defined(__i386__) || defined(__x86_64__)
+	if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
+		pgprot_val(tmp) |= _PAGE_PCD;
+		pgprot_val(tmp) &= ~_PAGE_PWT;
+	}
+#elif defined(__powerpc__)
+	pgprot_val(tmp) |= _PAGE_NO_CACHE;
+	if (map_type == _DRM_REGISTERS)
+		pgprot_val(tmp) |= _PAGE_GUARDED;
+#endif
+#if defined(__ia64__)
+	if (efi_range_is_wc(vma->vm_start, vma->vm_end -
+				    vma->vm_start))
+		tmp = pgprot_writecombine(tmp);
+	else
+		tmp = pgprot_noncached(tmp);
+#endif
+	return tmp;
+}
+
 
 /**
  * \c nopage method for AGP virtual memory.
@@ -70,7 +100,7 @@
 	if (!dev->agp || !dev->agp->cant_use_aperture)
 		goto vm_nopage_error;
 
-	if (drm_ht_find_item(&dev->map_hash, VM_OFFSET(vma), &hash))
+	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
 		goto vm_nopage_error;
 
 	r_list = drm_hash_entry(hash, drm_map_list_t, hash);
@@ -129,6 +159,95 @@
 }
 #endif				/* __OS_HAS_AGP */
 
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) || \
+     LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
+static
+#endif
+struct page *drm_vm_ttm_fault(struct vm_area_struct *vma, 
+			      struct fault_data *data)
+{
+	unsigned long address = data->address;
+	drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
+	unsigned long page_offset;
+	struct page *page;
+	drm_ttm_t *ttm; 
+	drm_buffer_manager_t *bm;
+	drm_device_t *dev;
+	unsigned long pfn;
+	int err;
+	pgprot_t pgprot;
+
+	if (!map) {
+		data->type = VM_FAULT_OOM;
+		return NULL;
+	}
+
+	if (address > vma->vm_end) {
+		data->type = VM_FAULT_SIGBUS;
+		return NULL;
+	}
+
+	ttm = (drm_ttm_t *) map->offset;
+	
+	dev = ttm->dev;
+
+	/*
+	 * Perhaps retry here?
+	 */
+
+	mutex_lock(&dev->struct_mutex);
+	drm_fixup_ttm_caching(ttm);
+
+	bm = &dev->bm;
+	page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
+	page = ttm->pages[page_offset];
+
+	if (!page) {
+		if (drm_alloc_memctl(PAGE_SIZE)) {
+			data->type = VM_FAULT_OOM;
+			goto out;
+		}
+		page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
+		if (!page) {
+			drm_free_memctl(PAGE_SIZE);
+			data->type = VM_FAULT_OOM;
+			goto out;
+		}
+		++bm->cur_pages;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
+		SetPageLocked(page);
+#else
+		SetPageReserved(page);
+#endif
+	}
+
+	if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) {
+
+		/*
+		 * FIXME: Check can't map aperture flag.
+		 */
+
+		pfn = ttm->aper_offset + page_offset + 
+			(ttm->be->aperture_base >> PAGE_SHIFT);
+		pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
+	} else {
+		pfn = page_to_pfn(page);
+		pgprot = vma->vm_page_prot;
+	}
+	
+	err = vm_insert_pfn(vma, address, pfn, pgprot);
+
+	if (!err || err == -EBUSY) 
+		data->type = VM_FAULT_MINOR; 
+	else
+		data->type = VM_FAULT_OOM;
+ out:
+	mutex_unlock(&dev->struct_mutex);
+	return NULL;
+}
+#endif
+
 /**
  * \c nopage method for shared virtual memory.
  *
@@ -198,7 +317,7 @@
 			} else {
 				dev->vmalist = pt->next;
 			}
-			drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
+			drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
 		} else {
 			prev = pt;
 		}
@@ -243,6 +362,9 @@
 				dmah.size = map->size;
 				__drm_pci_free(dev, &dmah);
 				break;
+		        case _DRM_TTM:
+				BUG_ON(1);
+				break;
 			}
 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
 		}
@@ -358,6 +480,7 @@
 	return drm_do_vm_sg_nopage(vma, address);
 }
 
+
 #else				/* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
 
 static struct page *drm_vm_nopage(struct vm_area_struct *vma,
@@ -414,6 +537,20 @@
 	.close = drm_vm_close,
 };
 
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
+static struct vm_operations_struct drm_vm_ttm_ops = {
+	.nopage = drm_vm_ttm_nopage,
+	.open = drm_vm_ttm_open_wrapper,
+	.close = drm_vm_ttm_close,
+};
+#else
+static struct vm_operations_struct drm_vm_ttm_ops = {
+	.fault = drm_vm_ttm_fault,
+	.open = drm_vm_ttm_open_wrapper,
+	.close = drm_vm_ttm_close,
+};
+#endif
+
 /**
  * \c open method for shared virtual memory.
  *
@@ -432,7 +569,7 @@
 		  vma->vm_start, vma->vm_end - vma->vm_start);
 	atomic_inc(&dev->vma_count);
 
-	vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
+	vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
 	if (vma_entry) {
 		mutex_lock(&dev->struct_mutex);
 		vma_entry->vma = vma;
@@ -443,6 +580,29 @@
 	}
 }
 
+static int drm_vm_ttm_open(struct vm_area_struct *vma) {
+  
+	drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
+	drm_ttm_t *ttm;
+	drm_file_t *priv = vma->vm_file->private_data;
+	drm_device_t *dev = priv->head->dev;
+
+	drm_vm_open(vma);
+	mutex_lock(&dev->struct_mutex);
+	ttm = (drm_ttm_t *) map->offset;
+	atomic_inc(&ttm->vma_count);
+#ifdef DRM_ODD_MM_COMPAT
+	drm_ttm_add_vma(ttm, vma);
+#endif
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma) 
+{
+	drm_vm_ttm_open(vma);
+}
+
 /**
  * \c close method for all virtual memory types.
  *
@@ -469,13 +629,42 @@
 			} else {
 				dev->vmalist = pt->next;
 			}
-			drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
+			drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
 			break;
 		}
 	}
 	mutex_unlock(&dev->struct_mutex);
 }
 
+
+static void drm_vm_ttm_close(struct vm_area_struct *vma)
+{
+	drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data; 
+	drm_ttm_t *ttm; 
+        drm_device_t *dev;
+	int ret;
+
+	drm_vm_close(vma); 
+	if (map) {
+		ttm = (drm_ttm_t *) map->offset;
+		dev = ttm->dev;
+		mutex_lock(&dev->struct_mutex);
+#ifdef DRM_ODD_MM_COMPAT
+		drm_ttm_delete_vma(ttm, vma);
+#endif
+		if (atomic_dec_and_test(&ttm->vma_count)) {
+			if (ttm->destroy) {
+				ret = drm_destroy_ttm(ttm);
+				BUG_ON(ret);
+				drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
+			}
+		}
+		mutex_unlock(&dev->struct_mutex);
+	}
+	return;
+}
+
+
 /**
  * mmap DMA memory.
  *
@@ -496,8 +685,8 @@
 	lock_kernel();
 	dev = priv->head->dev;
 	dma = dev->dma;
-	DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
-		  vma->vm_start, vma->vm_end, VM_OFFSET(vma));
+	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
+		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
 
 	/* Length must match exact page count */
 	if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
@@ -572,8 +761,8 @@
 	unsigned long offset = 0;
 	drm_hash_item_t *hash;
 
-	DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
-		  vma->vm_start, vma->vm_end, VM_OFFSET(vma));
+	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
+		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
 
 	if (!priv->authenticated)
 		return -EACCES;
@@ -582,7 +771,7 @@
 	 * the AGP mapped at physical address 0
 	 * --BenH.
 	 */
-	if (!VM_OFFSET(vma)
+	if (!vma->vm_pgoff
 #if __OS_HAS_AGP
 	    && (!dev->agp
 		|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
@@ -590,7 +779,7 @@
 	    )
 		return drm_mmap_dma(filp, vma);
 
-	if (drm_ht_find_item(&dev->map_hash, VM_OFFSET(vma), &hash)) {
+	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff , &hash)) {
 		DRM_ERROR("Could not find map\n");
 		return -EINVAL;
 	}
@@ -636,27 +825,9 @@
 		/* fall through to _DRM_FRAME_BUFFER... */
 	case _DRM_FRAME_BUFFER:
 	case _DRM_REGISTERS:
-#if defined(__i386__) || defined(__x86_64__)
-		if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
-			pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
-			pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
-		}
-#elif defined(__powerpc__)
-		pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
-		if (map->type == _DRM_REGISTERS)
-			pgprot_val(vma->vm_page_prot) |= _PAGE_GUARDED;
-#endif
-		vma->vm_flags |= VM_IO;	/* not in core dump */
-#if defined(__ia64__)
-		if (efi_range_is_wc(vma->vm_start, vma->vm_end -
-				    vma->vm_start))
-			vma->vm_page_prot =
-				pgprot_writecombine(vma->vm_page_prot);
-		else
-			vma->vm_page_prot =
-				pgprot_noncached(vma->vm_page_prot);
-#endif
 		offset = dev->driver->get_reg_ofs(dev);
+		vma->vm_flags |= VM_IO;	/* not in core dump */
+		vma->vm_page_prot = drm_io_prot(map->type, vma);
 #ifdef __sparc__
 		if (io_remap_pfn_range(vma, vma->vm_start,
 					(map->offset + offset) >>PAGE_SHIFT,
@@ -703,6 +874,20 @@
 		vma->vm_flags |= VM_RESERVED;
 #endif
 		break;
+	case _DRM_TTM: {
+		vma->vm_ops = &drm_vm_ttm_ops;
+		vma->vm_private_data = (void *) map;
+		vma->vm_file = filp;
+		vma->vm_flags |= VM_RESERVED | VM_IO;
+#ifdef DRM_ODD_MM_COMPAT
+		mutex_lock(&dev->struct_mutex);
+		drm_ttm_map_bound(vma);
+		mutex_unlock(&dev->struct_mutex);
+#endif		
+		if (drm_vm_ttm_open(vma))
+		        return -EAGAIN;
+		return 0;
+	}
 	default:
 		return -EINVAL;	/* This should never happen. */
 	}
diff --git a/linux-core/ffb_drv.c b/linux-core/ffb_drv.c
index 7b028c8..9c88f06 100644
--- a/linux-core/ffb_drv.c
+++ b/linux-core/ffb_drv.c
@@ -4,7 +4,6 @@
  * Copyright (C) 2000 David S. Miller (davem@redhat.com)
  */
 
-#include <linux/config.h>
 #include <linux/sched.h>
 #include <linux/smp_lock.h>
 #include <asm/shmparam.h>
diff --git a/linux-core/i810_drv.c b/linux-core/i810_drv.c
index d4b7376..fc784a0 100644
--- a/linux-core/i810_drv.c
+++ b/linux-core/i810_drv.c
@@ -30,7 +30,6 @@
  *    Gareth Hughes <gareth@valinux.com>
  */
 
-#include <linux/config.h>
 #include "drmP.h"
 #include "drm.h"
 #include "i810_drm.h"
diff --git a/linux-core/i830_drv.c b/linux-core/i830_drv.c
index 74b574a..6416161 100644
--- a/linux-core/i830_drv.c
+++ b/linux-core/i830_drv.c
@@ -32,8 +32,6 @@
  *    Keith Whitwell <keith@tungstengraphics.com>
  */
 
-#include <linux/config.h>
-
 #include "drmP.h"
 #include "drm.h"
 #include "i830_drm.h"
diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c
new file mode 100644
index 0000000..c3e5446
--- /dev/null
+++ b/linux-core/i915_buffer.c
@@ -0,0 +1,66 @@
+/**************************************************************************
+ * 
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ * 
+ * 
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+
+drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t * dev)
+{
+	return drm_agp_init_ttm(dev, NULL);
+}
+
+int i915_fence_types(uint32_t buffer_flags, uint32_t * class, uint32_t * type)
+{
+	*class = 0;
+	if (buffer_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
+		*type = 3;
+	else
+		*type = 1;
+	return 0;
+}
+
+int i915_invalidate_caches(drm_device_t * dev, uint32_t flags)
+{
+	/*
+	 * FIXME: Only emit once per batchbuffer submission.
+	 */
+
+	uint32_t flush_cmd = MI_NO_WRITE_FLUSH;
+
+	if (flags & DRM_BO_FLAG_READ)
+		flush_cmd |= MI_READ_FLUSH;
+	if (flags & DRM_BO_FLAG_EXE)
+		flush_cmd |= MI_EXE_FLUSH;
+
+	return i915_emit_mi_flush(dev, flush_cmd);
+}
diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c
index 209500b..2c5b43d 100644
--- a/linux-core/i915_drv.c
+++ b/linux-core/i915_drv.c
@@ -38,6 +38,27 @@
 	i915_PCI_IDS
 };
 
+#ifdef I915_HAVE_FENCE
+static drm_fence_driver_t i915_fence_driver = {
+	.no_types = 2,
+	.wrap_diff = (1 << 30),
+	.flush_diff = (1 << 29),
+	.sequence_mask = 0xffffffffU,
+	.lazy_capable = 1,
+	.emit = i915_fence_emit_sequence,
+	.poke_flush = i915_poke_flush,
+};
+#endif
+#ifdef I915_HAVE_BUFFER
+static drm_bo_driver_t i915_bo_driver = {
+        .iomap = {NULL, NULL},
+	.cached = {1, 1},
+	.create_ttm_backend_entry = i915_create_ttm_backend_entry,
+	.fence_type = i915_fence_types,
+	.invalidate_caches = i915_invalidate_caches
+};
+#endif
+
 static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
 static struct drm_driver driver = {
 	/* don't use mtrr's here, the Xserver or user space app should
@@ -79,7 +100,12 @@
 		.probe = probe,
 		.remove = __devexit_p(drm_cleanup_pci),
 		},
-
+#ifdef I915_HAVE_FENCE
+	.fence_driver = &i915_fence_driver,
+#endif
+#ifdef I915_HAVE_BUFFER
+	.bo_driver = &i915_bo_driver,
+#endif
 	.name = DRIVER_NAME,
 	.desc = DRIVER_DESC,
 	.date = DRIVER_DATE,
diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c
new file mode 100644
index 0000000..2182604
--- /dev/null
+++ b/linux-core/i915_fence.c
@@ -0,0 +1,146 @@
+/**************************************************************************
+ * 
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ * 
+ * 
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/*
+ * Implements an intel sync flush operation.
+ */
+
+static void i915_perform_flush(drm_device_t * dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	drm_fence_manager_t *fm = &dev->fm;
+	drm_fence_driver_t *driver = dev->driver->fence_driver;
+	uint32_t flush_flags = 0;
+	uint32_t flush_sequence = 0;
+	uint32_t i_status;
+	uint32_t diff;
+	uint32_t sequence;
+
+	if (!dev_priv)
+		return;
+
+	if (fm->pending_exe_flush) {
+		sequence = READ_BREADCRUMB(dev_priv);
+
+		/*
+		 * First update fences with the current breadcrumb.
+		 */
+
+		diff = sequence - fm->last_exe_flush;
+		if (diff < driver->wrap_diff && diff != 0) {
+			drm_fence_handler(dev, sequence, DRM_FENCE_TYPE_EXE);
+		}
+
+		diff = sequence - fm->exe_flush_sequence;
+		if (diff < driver->wrap_diff) {
+			fm->pending_exe_flush = 0;
+			if (dev_priv->fence_irq_on) {
+				i915_user_irq_off(dev_priv);
+				dev_priv->fence_irq_on = 0;
+			}
+		} else if (!dev_priv->fence_irq_on) {
+			i915_user_irq_on(dev_priv);
+			dev_priv->fence_irq_on = 1;
+		}
+	}
+
+	if (dev_priv->flush_pending) {
+		i_status = READ_HWSP(dev_priv, 0);
+		if ((i_status & (1 << 12)) !=
+		    (dev_priv->saved_flush_status & (1 << 12))) {
+			flush_flags = dev_priv->flush_flags;
+			flush_sequence = dev_priv->flush_sequence;
+			dev_priv->flush_pending = 0;
+			drm_fence_handler(dev, flush_sequence, flush_flags);
+		}
+	}
+
+	if (fm->pending_flush && !dev_priv->flush_pending) {
+		dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv);
+		dev_priv->flush_flags = fm->pending_flush;
+		dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0);
+		I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21));
+		dev_priv->flush_pending = 1;
+		fm->pending_flush = 0;
+	}
+
+	if (dev_priv->flush_pending) {
+		i_status = READ_HWSP(dev_priv, 0);
+		if ((i_status & (1 << 12)) !=
+		    (dev_priv->saved_flush_status & (1 << 12))) {
+			flush_flags = dev_priv->flush_flags;
+			flush_sequence = dev_priv->flush_sequence;
+			dev_priv->flush_pending = 0;
+			drm_fence_handler(dev, flush_sequence, flush_flags);
+		}
+	}
+
+}
+
+void i915_poke_flush(drm_device_t * dev)
+{
+	drm_fence_manager_t *fm = &dev->fm;
+	unsigned long flags;
+
+	write_lock_irqsave(&fm->lock, flags);
+	i915_perform_flush(dev);
+	write_unlock_irqrestore(&fm->lock, flags);
+}
+
+int i915_fence_emit_sequence(drm_device_t * dev, uint32_t flags,
+			     uint32_t * sequence, uint32_t * native_type)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	if (!dev_priv)
+		return -EINVAL;
+
+	i915_emit_irq(dev);
+	*sequence = (uint32_t) dev_priv->counter;
+	*native_type = DRM_FENCE_TYPE_EXE;
+	if (flags & DRM_I915_FENCE_FLAG_FLUSHED)
+		*native_type |= DRM_I915_FENCE_TYPE_RW;
+
+	return 0;
+}
+
+void i915_fence_handler(drm_device_t * dev)
+{
+	drm_fence_manager_t *fm = &dev->fm;
+
+	write_lock(&fm->lock);
+	i915_perform_flush(dev);
+	write_unlock(&fm->lock);
+}
diff --git a/linux-core/imagine_drv.c b/linux-core/imagine_drv.c
index bec2fae..6d05099 100644
--- a/linux-core/imagine_drv.c
+++ b/linux-core/imagine_drv.c
@@ -22,7 +22,6 @@
 
 /* derived from tdfx_drv.c */
 
-#include <linux/config.h>
 #include "drmP.h"
 #include "imagine_drv.h"
 
diff --git a/linux-core/mach64_drv.c b/linux-core/mach64_drv.c
index ba45132..9709934 100644
--- a/linux-core/mach64_drv.c
+++ b/linux-core/mach64_drv.c
@@ -27,7 +27,6 @@
  *    Leif Delgass <ldelgass@retinalburn.net>
  */
 
-#include <linux/config.h>
 #include "drmP.h"
 #include "drm.h"
 #include "mach64_drm.h"
diff --git a/linux-core/mga_drv.c b/linux-core/mga_drv.c
index 3a1e4b2..ef6f1e4 100644
--- a/linux-core/mga_drv.c
+++ b/linux-core/mga_drv.c
@@ -29,7 +29,6 @@
  *    Gareth Hughes <gareth@valinux.com>
  */
 
-#include <linux/config.h>
 #include "drmP.h"
 #include "drm.h"
 #include "mga_drm.h"
@@ -49,6 +48,7 @@
 	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
 	    DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
 	    DRIVER_IRQ_VBL,
+	.dev_priv_size = sizeof (drm_mga_buf_priv_t),
 	.load = mga_driver_load,
 	.unload = mga_driver_unload,
 	.lastclose = mga_driver_lastclose,
diff --git a/linux-core/nv_drv.c b/linux-core/nv_drv.c
index a6afb02..5049473 100644
--- a/linux-core/nv_drv.c
+++ b/linux-core/nv_drv.c
@@ -32,7 +32,6 @@
  *    Lars Knoll <lars@trolltech.com>
  */
 
-#include <linux/config.h>
 #include "drmP.h"
 #include "nv_drv.h"
 
diff --git a/linux-core/r128_drv.c b/linux-core/r128_drv.c
index edc04b0..ef4a5cb 100644
--- a/linux-core/r128_drv.c
+++ b/linux-core/r128_drv.c
@@ -29,7 +29,6 @@
  *    Gareth Hughes <gareth@valinux.com>
  */
 
-#include <linux/config.h>
 #include "drmP.h"
 #include "drm.h"
 #include "r128_drm.h"
diff --git a/linux-core/radeon_drv.c b/linux-core/radeon_drv.c
index b15e983..43b9aca 100644
--- a/linux-core/radeon_drv.c
+++ b/linux-core/radeon_drv.c
@@ -29,7 +29,6 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#include <linux/config.h>
 #include "drmP.h"
 #include "drm.h"
 #include "radeon_drm.h"
diff --git a/linux-core/savage_drv.c b/linux-core/savage_drv.c
index 9f12dfe..bb3561e 100644
--- a/linux-core/savage_drv.c
+++ b/linux-core/savage_drv.c
@@ -23,7 +23,6 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#include <linux/config.h>
 #include "drmP.h"
 #include "savage_drm.h"
 #include "savage_drv.h"
diff --git a/linux-core/sis_drv.c b/linux-core/sis_drv.c
index 36a525d..9b0b983 100644
--- a/linux-core/sis_drv.c
+++ b/linux-core/sis_drv.c
@@ -25,7 +25,6 @@
  *
  */
 
-#include <linux/config.h>
 #include "drmP.h"
 #include "sis_drm.h"
 #include "sis_drv.h"
diff --git a/linux-core/tdfx_drv.c b/linux-core/tdfx_drv.c
index ce1b7c5..bc69c06 100644
--- a/linux-core/tdfx_drv.c
+++ b/linux-core/tdfx_drv.c
@@ -30,7 +30,6 @@
  *    Gareth Hughes <gareth@valinux.com>
  */
 
-#include <linux/config.h>
 #include "drmP.h"
 #include "tdfx_drv.h"
 
diff --git a/shared-core/drm.h b/shared-core/drm.h
index 8c0c5d2..16e8626 100644
--- a/shared-core/drm.h
+++ b/shared-core/drm.h
@@ -69,9 +69,6 @@
 #endif
 
 #if defined(__linux__)
-#if defined(__KERNEL__)
-#include <linux/config.h>
-#endif
 #include <asm/ioctl.h>		/* For _IO* macros */
 #define DRM_IOCTL_NR(n)		_IOC_NR(n)
 #define DRM_IOC_VOID		_IOC_NONE
@@ -134,6 +131,12 @@
 #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
 
 #if defined(__linux__)
+#if defined(__KERNEL__)
+typedef __u64 drm_u64_t;
+#else
+typedef unsigned long long drm_u64_t;
+#endif
+
 typedef unsigned int drm_handle_t;
 #else
 typedef unsigned long drm_handle_t;	/**< To mapped regions */
@@ -267,7 +270,8 @@
 	_DRM_SHM = 2,		  /**< shared, cached */
 	_DRM_AGP = 3,		  /**< AGP/GART */
 	_DRM_SCATTER_GATHER = 4,  /**< Scatter/gather memory for PCI DMA */
-	_DRM_CONSISTENT = 5	  /**< Consistent memory for PCI DMA */
+	_DRM_CONSISTENT = 5,	  /**< Consistent memory for PCI DMA */
+	_DRM_TTM = 6
 } drm_map_type_t;
 
 /**
@@ -656,6 +660,190 @@
 	int drm_dd_minor;
 } drm_set_version_t;
 
+
+#define DRM_FENCE_FLAG_EMIT                0x00000001
+#define DRM_FENCE_FLAG_SHAREABLE           0x00000002
+#define DRM_FENCE_FLAG_WAIT_LAZY           0x00000004
+#define DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS 0x00000008
+
+/* Reserved for driver use */
+#define DRM_FENCE_MASK_DRIVER              0xFF000000
+
+#define DRM_FENCE_TYPE_EXE                 0x00000001
+
+typedef struct drm_fence_arg {
+	unsigned handle;
+        int class;
+	unsigned type;
+	unsigned flags;
+	unsigned signaled;
+	unsigned expand_pad[4]; /*Future expansion */
+	enum {
+		drm_fence_create,
+		drm_fence_destroy,
+		drm_fence_reference,
+		drm_fence_unreference,
+		drm_fence_signaled,
+		drm_fence_flush,
+		drm_fence_wait,
+		drm_fence_emit,
+		drm_fence_buffers
+	} op;
+} drm_fence_arg_t;
+
+/* Buffer permissions, referring to how the GPU uses the buffers.
+   these translate to fence types used for the buffers. 
+   Typically a texture buffer is read, A destination buffer is write and
+   a command (batch-) buffer is exe. Can be or-ed together. */
+
+#define DRM_BO_FLAG_READ        0x00000001
+#define DRM_BO_FLAG_WRITE       0x00000002
+#define DRM_BO_FLAG_EXE         0x00000004
+
+/*
+ * Status flags. Can be read to determine the actual state of a buffer.
+ */
+
+/* 
+ * Cannot evict this buffer. Not even with force. This type of buffer should
+ * only be available for root, and must be manually removed before buffer
+ * manager shutdown or swapout.
+ */
+#define DRM_BO_FLAG_NO_EVICT    0x00000010
+/* Always keep a system memory shadow to a vram buffer */
+#define DRM_BO_FLAG_SHADOW_VRAM 0x00000020
+/* The buffer is shareable with other processes */
+#define DRM_BO_FLAG_SHAREABLE   0x00000040
+/* The buffer is currently cached */
+#define DRM_BO_FLAG_CACHED      0x00000080
+/* Make sure that every time this buffer is validated, it ends up on the same
+ * location. The buffer will also not be evicted when claiming space for
+ * other buffers. Basically a pinned buffer but it may be thrown out as
+ * part of buffer manager shutdown or swapout. Not supported yet.*/
+#define DRM_BO_FLAG_NO_MOVE     0x00000100
+
+/* Make sure the buffer is in cached memory when mapped for reading */
+#define DRM_BO_FLAG_READ_CACHED 0x00080000
+/* When there is a choice between VRAM and TT, prefer VRAM. 
+   The default behaviour is to prefer TT. */
+#define DRM_BO_FLAG_PREFER_VRAM 0x00040000
+/* Bind this buffer cached if the hardware supports it. */
+#define DRM_BO_FLAG_BIND_CACHED 0x0002000
+
+/* System Memory */
+#define DRM_BO_FLAG_MEM_LOCAL  0x01000000
+/* Translation table memory */
+#define DRM_BO_FLAG_MEM_TT     0x02000000
+/* Vram memory */
+#define DRM_BO_FLAG_MEM_VRAM   0x04000000
+/* Unmappable Vram memory */
+#define DRM_BO_FLAG_MEM_VRAM_NM   0x08000000
+/* Memory flag mask */
+#define DRM_BO_MASK_MEM         0xFF000000
+
+/* When creating a buffer, Avoid system storage even if allowed */
+#define DRM_BO_HINT_AVOID_LOCAL 0x00000001
+/* Don't block on validate and map */
+#define DRM_BO_HINT_DONT_BLOCK  0x00000002
+/* Don't place this buffer on the unfenced list.*/
+#define DRM_BO_HINT_DONT_FENCE  0x00000004
+#define DRM_BO_HINT_WAIT_LAZY   0x00000008
+#define DRM_BO_HINT_ALLOW_UNFENCED_MAP 0x00000010
+
+
+/* Driver specific flags. Could be for example rendering engine */  
+#define DRM_BO_MASK_DRIVER      0x00F00000
+
+typedef enum {
+	drm_bo_type_dc,
+	drm_bo_type_user,
+	drm_bo_type_fake
+}drm_bo_type_t;
+	
+
+typedef struct drm_bo_arg_request {
+	unsigned handle; /* User space handle */
+	unsigned mask;
+	unsigned hint;
+	drm_u64_t size;
+	drm_bo_type_t type;
+	unsigned arg_handle;
+	drm_u64_t buffer_start;
+        unsigned page_alignment;
+	unsigned expand_pad[4]; /*Future expansion */
+	enum {
+		drm_bo_create,
+		drm_bo_validate,
+		drm_bo_map,
+		drm_bo_unmap,
+		drm_bo_fence,
+		drm_bo_destroy,
+		drm_bo_reference,
+		drm_bo_unreference,
+		drm_bo_info,
+		drm_bo_wait_idle,
+		drm_bo_ref_fence
+	} op;
+} drm_bo_arg_request_t;
+
+
+/*
+ * Reply flags
+ */
+
+#define DRM_BO_REP_BUSY 0x00000001
+
+typedef struct drm_bo_arg_reply {
+	int ret;
+	unsigned handle;
+	unsigned flags;
+	drm_u64_t size;
+	drm_u64_t offset;
+	drm_u64_t arg_handle;
+        unsigned mask;
+        drm_u64_t buffer_start;
+        unsigned fence_flags;
+        unsigned rep_flags;
+        unsigned page_alignment;
+	unsigned expand_pad[4]; /*Future expansion */
+}drm_bo_arg_reply_t;
+	
+
+typedef struct drm_bo_arg{
+        int handled;
+	drm_u64_t next;
+	union {
+		drm_bo_arg_request_t req;
+		drm_bo_arg_reply_t rep;
+	} d;
+} drm_bo_arg_t;
+
+#define DRM_BO_MEM_LOCAL 0
+#define DRM_BO_MEM_TT 1
+#define DRM_BO_MEM_VRAM 2
+#define DRM_BO_MEM_VRAM_NM 3
+#define DRM_BO_MEM_TYPES 2 /* For now. */
+
+typedef union drm_mm_init_arg{
+	struct {
+		enum {
+			mm_init,
+			mm_takedown,
+			mm_query,
+			mm_lock,
+			mm_unlock
+		} op;
+		drm_u64_t p_offset;
+		drm_u64_t p_size;
+		unsigned mem_type;
+		unsigned expand_pad[8]; /*Future expansion */
+	} req;
+	struct {
+		drm_handle_t mm_sarea;
+		unsigned expand_pad[8]; /*Future expansion */
+	} rep;
+} drm_mm_init_arg_t;
+
 /**
  * \name Ioctls Definitions
  */
@@ -721,17 +909,23 @@
 
 #define DRM_IOCTL_WAIT_VBLANK		DRM_IOWR(0x3a, drm_wait_vblank_t)
 
+#define DRM_IOCTL_FENCE                 DRM_IOWR(0x3b, drm_fence_arg_t)
+#define DRM_IOCTL_BUFOBJ                DRM_IOWR(0x3d, drm_bo_arg_t)
+#define DRM_IOCTL_MM_INIT               DRM_IOWR(0x3e, drm_mm_init_arg_t)
+
 #define DRM_IOCTL_UPDATE_DRAW           DRM_IOW(0x3f, drm_update_draw_t)
 
 /*@}*/
 
 /**
  * Device specific ioctls should only be in their respective headers
- * The device specific ioctl range is from 0x40 to 0x79.
+ * The device specific ioctl range is from 0x40 to 0x99.
+ * Generic IOCTLS restart at 0xA0.
  *
  * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
  * drmCommandReadWrite().
  */
 #define DRM_COMMAND_BASE                0x40
+#define DRM_COMMAND_END                 0xA0
 
 #endif
diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c
index be235c1..60e3e94 100644
--- a/shared-core/i915_dma.c
+++ b/shared-core/i915_dma.c
@@ -196,9 +196,10 @@
 
 	I915_WRITE(0x02080, dev_priv->dma_status_page);
 	DRM_DEBUG("Enabled hardware status page\n");
-
 	dev->dev_private = (void *)dev_priv;
-
+#ifdef I915_HAVE_BUFFER
+	drm_bo_driver_init(dev);
+#endif
 	return 0;
 }
 
@@ -435,17 +436,39 @@
 
 	dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
 
-	if (dev_priv->counter > 0x7FFFFFFFUL)
-		dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
-
 	BEGIN_LP_RING(4);
 	OUT_RING(CMD_STORE_DWORD_IDX);
 	OUT_RING(20);
 	OUT_RING(dev_priv->counter);
 	OUT_RING(0);
 	ADVANCE_LP_RING();
+#ifdef I915_HAVE_FENCE
+	drm_fence_flush_old(dev, dev_priv->counter);
+#endif
 }
 
+
+int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	uint32_t flush_cmd = CMD_MI_FLUSH;
+	RING_LOCALS;
+
+	flush_cmd |= flush;
+
+	i915_kernel_lost_context(dev);
+
+	BEGIN_LP_RING(4);
+	OUT_RING(flush_cmd);
+	OUT_RING(0);
+	OUT_RING(0);
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+
+	return 0;
+}
+
+
 static int i915_dispatch_cmdbuffer(drm_device_t * dev,
 				   drm_i915_cmdbuffer_t * cmd)
 {
@@ -566,7 +589,9 @@
 	OUT_RING(dev_priv->counter);
 	OUT_RING(0);
 	ADVANCE_LP_RING();
-
+#ifdef I915_HAVE_FENCE
+	drm_fence_flush_old(dev, dev_priv->counter);
+#endif
 	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
 	return 0;
 }
@@ -680,6 +705,7 @@
 	return i915_dispatch_flip(dev);
 }
 
+
 static int i915_getparam(DRM_IOCTL_ARGS)
 {
 	DRM_DEVICE;
diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h
index 358b11e..9eec109 100644
--- a/shared-core/i915_drm.h
+++ b/shared-core/i915_drm.h
@@ -115,6 +115,16 @@
 	int pipeB_h;
 } drm_i915_sarea_t;
 
+/* Driver specific fence types and classes.
+ */
+
+/* The only fence class we support */
+#define DRM_I915_FENCE_CLASS_ACCEL 0
+/* Fence type that guarantees read-write flush */
+#define DRM_I915_FENCE_TYPE_RW 2
+/* MI_FLUSH programmed just before the fence */
+#define DRM_I915_FENCE_FLAG_FLUSHED 0x01000000
+
 /* Flags for perf_boxes
  */
 #define I915_BOX_RING_EMPTY    0x1
diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h
index de7f822..85804ce 100644
--- a/shared-core/i915_drv.h
+++ b/shared-core/i915_drv.h
@@ -35,9 +35,9 @@
 
 #define DRIVER_AUTHOR		"Tungsten Graphics, Inc."
 
-#define DRIVER_NAME		"i915"
+#define DRIVER_NAME		"i915-mm"
 #define DRIVER_DESC		"Intel Graphics"
-#define DRIVER_DATE		"20060119"
+#define DRIVER_DATE		"20060929"
 
 /* Interface history:
  *
@@ -50,9 +50,14 @@
  *      - Support vertical blank on secondary display pipe
  */
 #define DRIVER_MAJOR		1
-#define DRIVER_MINOR		6
+#define DRIVER_MINOR		7
 #define DRIVER_PATCHLEVEL	0
 
+#if defined(__linux__)
+#define I915_HAVE_FENCE
+#define I915_HAVE_BUFFER
+#endif
+
 typedef struct _drm_i915_ring_buffer {
 	int tail_mask;
 	unsigned long Start;
@@ -90,7 +95,7 @@
 	drm_dma_handle_t *status_page_dmah;
 	void *hw_status_page;
 	dma_addr_t dma_status_page;
-	unsigned long counter;
+	uint32_t counter;
 
 	unsigned int cpp;
 	int back_offset;
@@ -108,6 +113,18 @@
 	struct mem_block *agp_heap;
 	unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
 	int vblank_pipe;
+        spinlock_t user_irq_lock;
+        int user_irq_refcount;
+        int fence_irq_on;
+        uint32_t irq_enable_reg;
+        int irq_enabled;
+
+#ifdef I915_HAVE_FENCE
+        uint32_t flush_sequence;
+	uint32_t flush_flags;
+	uint32_t flush_pending;
+	uint32_t saved_flush_status;
+#endif
 
 	spinlock_t swaps_lock;
 	drm_i915_vbl_swap_t vbl_swaps;
@@ -125,6 +142,8 @@
 extern int i915_driver_device_is_agp(drm_device_t * dev);
 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
 			      unsigned long arg);
+extern int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush);
+
 
 /* i915_irq.c */
 extern int i915_irq_emit(DRM_IOCTL_ARGS);
@@ -138,6 +157,9 @@
 extern void i915_driver_irq_uninstall(drm_device_t * dev);
 extern int i915_vblank_pipe_set(DRM_IOCTL_ARGS);
 extern int i915_vblank_pipe_get(DRM_IOCTL_ARGS);
+extern int i915_emit_irq(drm_device_t * dev);
+extern void i915_user_irq_on(drm_i915_private_t *dev_priv);
+extern void i915_user_irq_off(drm_i915_private_t *dev_priv);
 extern int i915_vblank_swap(DRM_IOCTL_ARGS);
 
 /* i915_mem.c */
@@ -148,6 +170,23 @@
 extern void i915_mem_takedown(struct mem_block **heap);
 extern void i915_mem_release(drm_device_t * dev,
 			     DRMFILE filp, struct mem_block *heap);
+#ifdef I915_HAVE_FENCE
+/* i915_fence.c */
+
+
+extern void i915_fence_handler(drm_device_t *dev);
+extern int i915_fence_emit_sequence(drm_device_t *dev, uint32_t flags,
+				    uint32_t *sequence, 
+				    uint32_t *native_type);
+extern void i915_poke_flush(drm_device_t *dev);
+#endif
+
+#ifdef I915_HAVE_BUFFER
+/* i915_buffer.c */
+extern drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev);
+extern int i915_fence_types(uint32_t buffer_flags, uint32_t *class, uint32_t *type);
+extern int i915_invalidate_caches(drm_device_t *dev, uint32_t buffer_flags);
+#endif
 
 #define I915_READ(reg)          DRM_READ32(dev_priv->mmio_map, (reg))
 #define I915_WRITE(reg,val)     DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
@@ -198,6 +237,11 @@
 #define INST_OP_FLUSH        0x02000000
 #define INST_FLUSH_MAP_CACHE 0x00000001
 
+#define CMD_MI_FLUSH         (0x04 << 23)
+#define MI_NO_WRITE_FLUSH    (1 << 2)
+#define MI_READ_FLUSH        (1 << 0)
+#define MI_EXE_FLUSH         (1 << 1)
+
 #define BB1_START_ADDR_MASK   (~0x7)
 #define BB1_PROTECTED         (1<<0)
 #define BB1_UNPROTECTED       (0<<0)
@@ -207,6 +251,7 @@
 #define I915REG_INT_IDENTITY_R	0x020a4
 #define I915REG_INT_MASK_R 	0x020a8
 #define I915REG_INT_ENABLE_R	0x020a0
+#define I915REG_INSTPM	        0x020c0
 
 #define SRX_INDEX		0x3c4
 #define SRX_DATA		0x3c5
@@ -292,6 +337,6 @@
 
 #define CMD_OP_DESTBUFFER_INFO	 ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
 
-#define READ_BREADCRUMB(dev_priv)  (((u32*)(dev_priv->hw_status_page))[5])
-
+#define READ_BREADCRUMB(dev_priv)  (((volatile u32*)(dev_priv->hw_status_page))[5])
+#define READ_HWSP(dev_priv, reg)  (((volatile u32*)(dev_priv->hw_status_page))[reg])
 #endif
diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c
index feb7acc..a48e1ff 100644
--- a/shared-core/i915_irq.c
+++ b/shared-core/i915_irq.c
@@ -138,10 +138,11 @@
 
 	temp = I915_READ16(I915REG_INT_IDENTITY_R);
 
-	temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG);
+	temp &= (dev_priv->irq_enable_reg | USER_INT_FLAG);
 
+#if 0
 	DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
-
+#endif
 	if (temp == 0)
 		return IRQ_NONE;
 
@@ -149,8 +150,12 @@
 
 	dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
 
-	if (temp & USER_INT_FLAG)
+	if (temp & USER_INT_FLAG) {
 		DRM_WAKEUP(&dev_priv->irq_queue);
+#ifdef I915_HAVE_FENCE
+		i915_fence_handler(dev);
+#endif
+	}
 
 	if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
 		int vblank_pipe = dev_priv->vblank_pipe;
@@ -178,7 +183,7 @@
 	return IRQ_HANDLED;
 }
 
-static int i915_emit_irq(drm_device_t * dev)
+int i915_emit_irq(drm_device_t * dev)
 {
 	
 	drm_i915_private_t *dev_priv = dev->dev_private;
@@ -208,6 +213,28 @@
 
 }
 
+void i915_user_irq_on(drm_i915_private_t *dev_priv)
+{
+	spin_lock(&dev_priv->user_irq_lock);
+	if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){
+		dev_priv->irq_enable_reg |= USER_INT_FLAG;
+		I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
+	}
+	spin_unlock(&dev_priv->user_irq_lock);
+
+}
+		
+void i915_user_irq_off(drm_i915_private_t *dev_priv)
+{
+	spin_lock(&dev_priv->user_irq_lock);
+	if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
+		//		dev_priv->irq_enable_reg &= ~USER_INT_FLAG;
+		//		I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
+	}
+	spin_unlock(&dev_priv->user_irq_lock);
+}
+		
+
 static int i915_wait_irq(drm_device_t * dev, int irq_nr)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -220,9 +247,11 @@
 		return 0;
 
 	dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
-
+	
+	i915_user_irq_on(dev_priv);
 	DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
 		    READ_BREADCRUMB(dev_priv) >= irq_nr);
+	i915_user_irq_off(dev_priv);
 
 	if (ret == DRM_ERR(EBUSY)) {
 		DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n",
@@ -316,15 +345,15 @@
 static void i915_enable_interrupt (drm_device_t *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	u16 flag;
 	
-	flag = 0;
+	dev_priv->irq_enable_reg = USER_INT_FLAG; 
 	if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
-		flag |= VSYNC_PIPEA_FLAG;
+		dev_priv->irq_enable_reg |= VSYNC_PIPEA_FLAG;
 	if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
-		flag |= VSYNC_PIPEB_FLAG;
+		dev_priv->irq_enable_reg |= VSYNC_PIPEB_FLAG;
 
-	I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag);
+	I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
+	dev_priv->irq_enabled = 1;
 }
 
 /* Set the vblank monitor pipe
@@ -497,7 +526,7 @@
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
-	I915_WRITE16(I915REG_HWSTAM, 0xfffe);
+	I915_WRITE16(I915REG_HWSTAM, 0xeffe);
 	I915_WRITE16(I915REG_INT_MASK_R, 0x0);
 	I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
 }
@@ -512,8 +541,24 @@
 
 	if (!dev_priv->vblank_pipe)
 		dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
+
+	dev_priv->swaps_lock = SPIN_LOCK_UNLOCKED;
+	INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
+	dev_priv->swaps_pending = 0;
+
+	dev_priv->user_irq_lock = SPIN_LOCK_UNLOCKED;
+	dev_priv->user_irq_refcount = 0;
+
+	if (!dev_priv->vblank_pipe)
+		dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
 	i915_enable_interrupt(dev);
 	DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
+
+	/*
+	 * Initialize the hardware status page IRQ location.
+	 */
+
+	I915_WRITE(I915REG_INSTPM, ( 1 << 5) | ( 1 << 21));
 }
 
 void i915_driver_irq_uninstall(drm_device_t * dev)
@@ -523,6 +568,7 @@
 	if (!dev_priv)
 		return;
 
+	dev_priv->irq_enabled = 0;
 	I915_WRITE16(I915REG_HWSTAM, 0xffff);
 	I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
 	I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
diff --git a/shared-core/savage_bci.c b/shared-core/savage_bci.c
index 20fea40..01121b9 100644
--- a/shared-core/savage_bci.c
+++ b/shared-core/savage_bci.c
@@ -725,6 +725,7 @@
 		dev_priv->status = NULL;
 	}
 	if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) {
+		dev->agp_buffer_token = init->buffers_offset;
 		dev->agp_buffer_map = drm_core_findmap(dev,
 						       init->buffers_offset);
 		if (!dev->agp_buffer_map) {
diff --git a/shared-core/via_drv.c b/shared-core/via_drv.c
index bacfe37..33b0a42 100644
--- a/shared-core/via_drv.c
+++ b/shared-core/via_drv.c
@@ -22,7 +22,6 @@
  * DEALINGS IN THE SOFTWARE.
  */
 
-#include <linux/config.h>
 #include "drmP.h"
 #include "via_drm.h"
 #include "via_drv.h"