drm/vmwgfx: Initial DX support

Initial DX support.
Co-authored with Sinclair Yeh, Charmaine Lee and Jakob Bornecrantz.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Sinclair Yeh <syeh@vmware.com>
Signed-off-by: Charmaine Lee <charmainel@vmware.com>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 15f9544..abfe67c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -27,19 +27,18 @@
 
 #include "vmwgfx_drv.h"
 #include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
 #include "ttm/ttm_placement.h"
 
 struct vmw_user_context {
 	struct ttm_base_object base;
 	struct vmw_resource res;
-	struct vmw_ctx_binding_state cbs;
+	struct vmw_ctx_binding_state *cbs;
 	struct vmw_cmdbuf_res_manager *man;
+	struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
+	spinlock_t cotable_lock;
 };
 
-
-
-typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
-
 static void vmw_user_context_free(struct vmw_resource *res);
 static struct vmw_resource *
 vmw_user_context_base_to_res(struct ttm_base_object *base);
@@ -51,12 +50,14 @@
 				 bool readback,
 				 struct ttm_validate_buffer *val_buf);
 static int vmw_gb_context_destroy(struct vmw_resource *res);
-static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
-static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
-					   bool rebind);
-static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
-static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
-static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
+static int vmw_dx_context_create(struct vmw_resource *res);
+static int vmw_dx_context_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf);
+static int vmw_dx_context_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf);
+static int vmw_dx_context_destroy(struct vmw_resource *res);
+
 static uint64_t vmw_user_context_size;
 
 static const struct vmw_user_resource_conv user_context_conv = {
@@ -93,15 +94,36 @@
 	.unbind = vmw_gb_context_unbind
 };
 
-static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
-	[vmw_ctx_binding_shader] = vmw_context_scrub_shader,
-	[vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
-	[vmw_ctx_binding_tex] = vmw_context_scrub_texture };
+static const struct vmw_res_func vmw_dx_context_func = {
+	.res_type = vmw_res_dx_context,
+	.needs_backup = true,
+	.may_evict = true,
+	.type_name = "dx contexts",
+	.backup_placement = &vmw_mob_placement,
+	.create = vmw_dx_context_create,
+	.destroy = vmw_dx_context_destroy,
+	.bind = vmw_dx_context_bind,
+	.unbind = vmw_dx_context_unbind
+};
 
 /**
  * Context management:
  */
 
+static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
+{
+	struct vmw_resource *res;
+	int i;
+
+	for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+		spin_lock(&uctx->cotable_lock);
+		res = uctx->cotables[i];
+		uctx->cotables[i] = NULL;
+		spin_unlock(&uctx->cotable_lock);
+		vmw_resource_unreference(&res);
+	}
+}
+
 static void vmw_hw_context_destroy(struct vmw_resource *res)
 {
 	struct vmw_user_context *uctx =
@@ -113,17 +135,19 @@
 	} *cmd;
 
 
-	if (res->func->destroy == vmw_gb_context_destroy) {
+	if (res->func->destroy == vmw_gb_context_destroy ||
+	    res->func->destroy == vmw_dx_context_destroy) {
 		mutex_lock(&dev_priv->cmdbuf_mutex);
 		vmw_cmdbuf_res_man_destroy(uctx->man);
 		mutex_lock(&dev_priv->binding_mutex);
-		(void) vmw_context_binding_state_kill(&uctx->cbs);
-		(void) vmw_gb_context_destroy(res);
+		vmw_binding_state_kill(uctx->cbs);
+		(void) res->func->destroy(res);
 		mutex_unlock(&dev_priv->binding_mutex);
 		if (dev_priv->pinned_bo != NULL &&
 		    !dev_priv->query_cid_valid)
 			__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
 		mutex_unlock(&dev_priv->cmdbuf_mutex);
+		vmw_context_cotables_unref(uctx);
 		return;
 	}
 
@@ -144,16 +168,20 @@
 }
 
 static int vmw_gb_context_init(struct vmw_private *dev_priv,
+			       bool dx,
 			       struct vmw_resource *res,
-			       void (*res_free) (struct vmw_resource *res))
+			       void (*res_free)(struct vmw_resource *res))
 {
-	int ret;
+	int ret, i;
 	struct vmw_user_context *uctx =
 		container_of(res, struct vmw_user_context, res);
 
+	res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
+			    SVGA3D_CONTEXT_DATA_SIZE);
 	ret = vmw_resource_init(dev_priv, res, true,
-				res_free, &vmw_gb_context_func);
-	res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
+				res_free,
+				dx ? &vmw_dx_context_func :
+				&vmw_gb_context_func);
 	if (unlikely(ret != 0))
 		goto out_err;
 
@@ -166,12 +194,32 @@
 		}
 	}
 
-	memset(&uctx->cbs, 0, sizeof(uctx->cbs));
-	INIT_LIST_HEAD(&uctx->cbs.list);
+	uctx->cbs = vmw_binding_state_alloc(dev_priv);
+	if (IS_ERR(uctx->cbs)) {
+		ret = PTR_ERR(uctx->cbs);
+		goto out_err;
+	}
+
+	spin_lock_init(&uctx->cotable_lock);
+
+	if (dx) {
+		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+			uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
+							      &uctx->res, i);
+			if (unlikely(uctx->cotables[i] == NULL)) {
+				ret = -ENOMEM;
+				goto out_cotables;
+			}
+		}
+	}
+
+
 
 	vmw_resource_activate(res, vmw_hw_context_destroy);
 	return 0;
 
+out_cotables:
+	vmw_context_cotables_unref(uctx);
 out_err:
 	if (res_free)
 		res_free(res);
@@ -182,7 +230,8 @@
 
 static int vmw_context_init(struct vmw_private *dev_priv,
 			    struct vmw_resource *res,
-			    void (*res_free) (struct vmw_resource *res))
+			    void (*res_free)(struct vmw_resource *res),
+			    bool dx)
 {
 	int ret;
 
@@ -192,7 +241,7 @@
 	} *cmd;
 
 	if (dev_priv->has_mob)
-		return vmw_gb_context_init(dev_priv, res, res_free);
+		return vmw_gb_context_init(dev_priv, dx, res, res_free);
 
 	ret = vmw_resource_init(dev_priv, res, false,
 				res_free, &vmw_legacy_context_func);
@@ -232,19 +281,10 @@
 	return ret;
 }
 
-struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
-{
-	struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
-	int ret;
 
-	if (unlikely(res == NULL))
-		return NULL;
-
-	ret = vmw_context_init(dev_priv, res, NULL);
-
-	return (ret == 0) ? res : NULL;
-}
-
+/*
+ * GB context.
+ */
 
 static int vmw_gb_context_create(struct vmw_resource *res)
 {
@@ -309,7 +349,6 @@
 			  "binding.\n");
 		return -ENOMEM;
 	}
-
 	cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.cid = res->id;
@@ -346,7 +385,7 @@
 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 
 	mutex_lock(&dev_priv->binding_mutex);
-	vmw_context_binding_state_scrub(&uctx->cbs);
+	vmw_binding_state_scrub(uctx->cbs);
 
 	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
 
@@ -419,6 +458,221 @@
 	return 0;
 }
 
+/*
+ * DX context.
+ */
+
+static int vmw_dx_context_create(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	int ret;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXDefineContext body;
+	} *cmd;
+
+	if (likely(res->id != -1))
+		return 0;
+
+	ret = vmw_resource_alloc_id(res);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to allocate a context id.\n");
+		goto out_no_id;
+	}
+
+	if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
+		ret = -EBUSY;
+		goto out_no_fifo;
+	}
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for context "
+			  "creation.\n");
+		ret = -ENOMEM;
+		goto out_no_fifo;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_fifo_resource_inc(dev_priv);
+
+	return 0;
+
+out_no_fifo:
+	vmw_resource_release_id(res);
+out_no_id:
+	return ret;
+}
+
+static int vmw_dx_context_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindContext body;
+	} *cmd;
+	struct ttm_buffer_object *bo = val_buf->bo;
+
+	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for context "
+			  "binding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
+	cmd->body.mobid = bo->mem.start;
+	cmd->body.validContents = res->backup_dirty;
+	res->backup_dirty = false;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+
+	return 0;
+}
+
+/**
+ * vmw_dx_context_scrub_cotables - Scrub all bindings and
+ * cotables from a context
+ *
+ * @ctx: Pointer to the context resource
+ * @readback: Whether to save the otable contents on scrubbing.
+ *
+ * COtables must be unbound before their context, but unbinding requires
+ * the backup buffer being reserved, whereas scrubbing does not.
+ * This function scrubs all cotables of a context, potentially reading back
+ * the contents into their backup buffers. However, scrubbing cotables
+ * also makes the device context invalid, so scrub all bindings first so
+ * that doesn't have to be done later with an invalid context.
+ */
+void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
+				   bool readback)
+{
+	struct vmw_user_context *uctx =
+		container_of(ctx, struct vmw_user_context, res);
+	int i;
+
+	vmw_binding_state_scrub(uctx->cbs);
+	for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+		struct vmw_resource *res;
+
+		/* Avoid racing with ongoing cotable destruction. */
+		spin_lock(&uctx->cotable_lock);
+		res = uctx->cotables[vmw_cotable_scrub_order[i]];
+		if (res)
+			res = vmw_resource_reference_unless_doomed(res);
+		spin_unlock(&uctx->cotable_lock);
+		if (!res)
+			continue;
+
+		WARN_ON(vmw_cotable_scrub(res, readback));
+		vmw_resource_unreference(&res);
+	}
+}
+
+static int vmw_dx_context_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct ttm_buffer_object *bo = val_buf->bo;
+	struct vmw_fence_obj *fence;
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXReadbackContext body;
+	} *cmd1;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindContext body;
+	} *cmd2;
+	uint32_t submit_size;
+	uint8_t *cmd;
+
+
+	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+	mutex_lock(&dev_priv->binding_mutex);
+	vmw_dx_context_scrub_cotables(res, readback);
+
+	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
+
+	cmd = vmw_fifo_reserve(dev_priv, submit_size);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for context "
+			  "unbinding.\n");
+		mutex_unlock(&dev_priv->binding_mutex);
+		return -ENOMEM;
+	}
+
+	cmd2 = (void *) cmd;
+	if (readback) {
+		cmd1 = (void *) cmd;
+		cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
+		cmd1->header.size = sizeof(cmd1->body);
+		cmd1->body.cid = res->id;
+		cmd2 = (void *) (&cmd1[1]);
+	}
+	cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
+	cmd2->header.size = sizeof(cmd2->body);
+	cmd2->body.cid = res->id;
+	cmd2->body.mobid = SVGA3D_INVALID_ID;
+
+	vmw_fifo_commit(dev_priv, submit_size);
+	mutex_unlock(&dev_priv->binding_mutex);
+
+	/*
+	 * Create a fence object and fence the backup buffer.
+	 */
+
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
+					  &fence, NULL);
+
+	vmw_fence_single_bo(bo, fence);
+
+	if (likely(fence != NULL))
+		vmw_fence_obj_unreference(&fence);
+
+	return 0;
+}
+
+static int vmw_dx_context_destroy(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXDestroyContext body;
+	} *cmd;
+
+	if (likely(res->id == -1))
+		return 0;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for context "
+			  "destruction.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	if (dev_priv->query_cid == res->id)
+		dev_priv->query_cid_valid = false;
+	vmw_resource_release_id(res);
+	vmw_fifo_resource_dec(dev_priv);
+
+	return 0;
+}
+
 /**
  * User-space context management:
  */
@@ -435,6 +689,8 @@
 	    container_of(res, struct vmw_user_context, res);
 	struct vmw_private *dev_priv = res->dev_priv;
 
+	if (ctx->cbs)
+		vmw_binding_state_free(ctx->cbs);
 	ttm_base_object_kfree(ctx, base);
 	ttm_mem_global_free(vmw_mem_glob(dev_priv),
 			    vmw_user_context_size);
@@ -465,8 +721,8 @@
 	return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
 }
 
-int vmw_context_define_ioctl(struct drm_device *dev, void *data,
-			     struct drm_file *file_priv)
+static int vmw_context_define(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv, bool dx)
 {
 	struct vmw_private *dev_priv = vmw_priv(dev);
 	struct vmw_user_context *ctx;
@@ -476,6 +732,10 @@
 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 	int ret;
 
+	if (!dev_priv->has_dx && dx) {
+		DRM_ERROR("DX contexts not supported by device.\n");
+		return -EINVAL;
+	}
 
 	/*
 	 * Approximate idr memory usage with 128 bytes. It will be limited
@@ -516,7 +776,7 @@
 	 * From here on, the destructor takes over resource freeing.
 	 */
 
-	ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
+	ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
 	if (unlikely(ret != 0))
 		goto out_unlock;
 
@@ -535,371 +795,29 @@
 out_unlock:
 	ttm_read_unlock(&dev_priv->reservation_sem);
 	return ret;
-
 }
 
-/**
- * vmw_context_scrub_shader - scrub a shader binding from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- */
-static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
+int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv)
 {
-	struct vmw_private *dev_priv = bi->ctx->dev_priv;
-	struct {
-		SVGA3dCmdHeader header;
-		SVGA3dCmdSetShader body;
-	} *cmd;
-
-	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed reserving FIFO space for shader "
-			  "unbinding.\n");
-		return -ENOMEM;
-	}
-
-	cmd->header.id = SVGA_3D_CMD_SET_SHADER;
-	cmd->header.size = sizeof(cmd->body);
-	cmd->body.cid = bi->ctx->id;
-	cmd->body.type = bi->i1.shader_type;
-	cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
-	return 0;
+	return vmw_context_define(dev, data, file_priv, false);
 }
 
-/**
- * vmw_context_scrub_render_target - scrub a render target binding
- * from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- */
-static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
-					   bool rebind)
+int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
+				      struct drm_file *file_priv)
 {
-	struct vmw_private *dev_priv = bi->ctx->dev_priv;
-	struct {
-		SVGA3dCmdHeader header;
-		SVGA3dCmdSetRenderTarget body;
-	} *cmd;
+	union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
+	struct drm_vmw_context_arg *rep = &arg->rep;
 
-	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed reserving FIFO space for render target "
-			  "unbinding.\n");
-		return -ENOMEM;
-	}
-
-	cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
-	cmd->header.size = sizeof(cmd->body);
-	cmd->body.cid = bi->ctx->id;
-	cmd->body.type = bi->i1.rt_type;
-	cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
-	cmd->body.target.face = 0;
-	cmd->body.target.mipmap = 0;
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
-	return 0;
-}
-
-/**
- * vmw_context_scrub_texture - scrub a texture binding from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- *
- * TODO: Possibly complement this function with a function that takes
- * a list of texture bindings and combines them to a single command.
- */
-static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
-				     bool rebind)
-{
-	struct vmw_private *dev_priv = bi->ctx->dev_priv;
-	struct {
-		SVGA3dCmdHeader header;
-		struct {
-			SVGA3dCmdSetTextureState c;
-			SVGA3dTextureState s1;
-		} body;
-	} *cmd;
-
-	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed reserving FIFO space for texture "
-			  "unbinding.\n");
-		return -ENOMEM;
-	}
-
-
-	cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
-	cmd->header.size = sizeof(cmd->body);
-	cmd->body.c.cid = bi->ctx->id;
-	cmd->body.s1.stage = bi->i1.texture_stage;
-	cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
-	cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
-	return 0;
-}
-
-/**
- * vmw_context_binding_drop: Stop tracking a context binding
- *
- * @cb: Pointer to binding tracker storage.
- *
- * Stops tracking a context binding, and re-initializes its storage.
- * Typically used when the context binding is replaced with a binding to
- * another (or the same, for that matter) resource.
- */
-static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
-{
-	list_del(&cb->ctx_list);
-	if (!list_empty(&cb->res_list))
-		list_del(&cb->res_list);
-	cb->bi.ctx = NULL;
-}
-
-/**
- * vmw_context_binding_add: Start tracking a context binding
- *
- * @cbs: Pointer to the context binding state tracker.
- * @bi: Information about the binding to track.
- *
- * Performs basic checks on the binding to make sure arguments are within
- * bounds and then starts tracking the binding in the context binding
- * state structure @cbs.
- */
-int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
-			    const struct vmw_ctx_bindinfo *bi)
-{
-	struct vmw_ctx_binding *loc;
-
-	switch (bi->bt) {
-	case vmw_ctx_binding_rt:
-		if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
-			DRM_ERROR("Illegal render target type %u.\n",
-				  (unsigned) bi->i1.rt_type);
-			return -EINVAL;
-		}
-		loc = &cbs->render_targets[bi->i1.rt_type];
-		break;
-	case vmw_ctx_binding_tex:
-		if (unlikely((unsigned)bi->i1.texture_stage >=
-			     SVGA3D_NUM_TEXTURE_UNITS)) {
-			DRM_ERROR("Illegal texture/sampler unit %u.\n",
-				  (unsigned) bi->i1.texture_stage);
-			return -EINVAL;
-		}
-		loc = &cbs->texture_units[bi->i1.texture_stage];
-		break;
-	case vmw_ctx_binding_shader:
-		if (unlikely((unsigned)bi->i1.shader_type >=
-			     SVGA3D_SHADERTYPE_PREDX_MAX)) {
-			DRM_ERROR("Illegal shader type %u.\n",
-				  (unsigned) bi->i1.shader_type);
-			return -EINVAL;
-		}
-		loc = &cbs->shaders[bi->i1.shader_type];
-		break;
+	switch (arg->req) {
+	case drm_vmw_context_legacy:
+		return vmw_context_define(dev, rep, file_priv, false);
+	case drm_vmw_context_dx:
+		return vmw_context_define(dev, rep, file_priv, true);
 	default:
-		BUG();
-	}
-
-	if (loc->bi.ctx != NULL)
-		vmw_context_binding_drop(loc);
-
-	loc->bi = *bi;
-	loc->bi.scrubbed = false;
-	list_add_tail(&loc->ctx_list, &cbs->list);
-	INIT_LIST_HEAD(&loc->res_list);
-
-	return 0;
-}
-
-/**
- * vmw_context_binding_transfer: Transfer a context binding tracking entry.
- *
- * @cbs: Pointer to the persistent context binding state tracker.
- * @bi: Information about the binding to track.
- *
- */
-static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
-					 const struct vmw_ctx_bindinfo *bi)
-{
-	struct vmw_ctx_binding *loc;
-
-	switch (bi->bt) {
-	case vmw_ctx_binding_rt:
-		loc = &cbs->render_targets[bi->i1.rt_type];
 		break;
-	case vmw_ctx_binding_tex:
-		loc = &cbs->texture_units[bi->i1.texture_stage];
-		break;
-	case vmw_ctx_binding_shader:
-		loc = &cbs->shaders[bi->i1.shader_type];
-		break;
-	default:
-		BUG();
 	}
-
-	if (loc->bi.ctx != NULL)
-		vmw_context_binding_drop(loc);
-
-	if (bi->res != NULL) {
-		loc->bi = *bi;
-		list_add_tail(&loc->ctx_list, &cbs->list);
-		list_add_tail(&loc->res_list, &bi->res->binding_head);
-	}
-}
-
-/**
- * vmw_context_binding_kill - Kill a binding on the device
- * and stop tracking it.
- *
- * @cb: Pointer to binding tracker storage.
- *
- * Emits FIFO commands to scrub a binding represented by @cb.
- * Then stops tracking the binding and re-initializes its storage.
- */
-static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
-{
-	if (!cb->bi.scrubbed) {
-		(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
-		cb->bi.scrubbed = true;
-	}
-	vmw_context_binding_drop(cb);
-}
-
-/**
- * vmw_context_binding_state_kill - Kill all bindings associated with a
- * struct vmw_ctx_binding state structure, and re-initialize the structure.
- *
- * @cbs: Pointer to the context binding state tracker.
- *
- * Emits commands to scrub all bindings associated with the
- * context binding state tracker. Then re-initializes the whole structure.
- */
-static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
-{
-	struct vmw_ctx_binding *entry, *next;
-
-	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
-		vmw_context_binding_kill(entry);
-}
-
-/**
- * vmw_context_binding_state_scrub - Scrub all bindings associated with a
- * struct vmw_ctx_binding state structure.
- *
- * @cbs: Pointer to the context binding state tracker.
- *
- * Emits commands to scrub all bindings associated with the
- * context binding state tracker.
- */
-static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
-{
-	struct vmw_ctx_binding *entry;
-
-	list_for_each_entry(entry, &cbs->list, ctx_list) {
-		if (!entry->bi.scrubbed) {
-			(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
-			entry->bi.scrubbed = true;
-		}
-	}
-}
-
-/**
- * vmw_context_binding_res_list_kill - Kill all bindings on a
- * resource binding list
- *
- * @head: list head of resource binding list
- *
- * Kills all bindings associated with a specific resource. Typically
- * called before the resource is destroyed.
- */
-void vmw_context_binding_res_list_kill(struct list_head *head)
-{
-	struct vmw_ctx_binding *entry, *next;
-
-	list_for_each_entry_safe(entry, next, head, res_list)
-		vmw_context_binding_kill(entry);
-}
-
-/**
- * vmw_context_binding_res_list_scrub - Scrub all bindings on a
- * resource binding list
- *
- * @head: list head of resource binding list
- *
- * Scrub all bindings associated with a specific resource. Typically
- * called before the resource is evicted.
- */
-void vmw_context_binding_res_list_scrub(struct list_head *head)
-{
-	struct vmw_ctx_binding *entry;
-
-	list_for_each_entry(entry, head, res_list) {
-		if (!entry->bi.scrubbed) {
-			(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
-			entry->bi.scrubbed = true;
-		}
-	}
-}
-
-/**
- * vmw_context_binding_state_transfer - Commit staged binding info
- *
- * @ctx: Pointer to context to commit the staged binding info to.
- * @from: Staged binding info built during execbuf.
- *
- * Transfers binding info from a temporary structure to the persistent
- * structure in the context. This can be done once commands
- */
-void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
-					struct vmw_ctx_binding_state *from)
-{
-	struct vmw_user_context *uctx =
-		container_of(ctx, struct vmw_user_context, res);
-	struct vmw_ctx_binding *entry, *next;
-
-	list_for_each_entry_safe(entry, next, &from->list, ctx_list)
-		vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
-}
-
-/**
- * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
- *
- * @ctx: The context resource
- *
- * Walks through the context binding list and rebinds all scrubbed
- * resources.
- */
-int vmw_context_rebind_all(struct vmw_resource *ctx)
-{
-	struct vmw_ctx_binding *entry;
-	struct vmw_user_context *uctx =
-		container_of(ctx, struct vmw_user_context, res);
-	struct vmw_ctx_binding_state *cbs = &uctx->cbs;
-	int ret;
-
-	list_for_each_entry(entry, &cbs->list, ctx_list) {
-		if (likely(!entry->bi.scrubbed))
-			continue;
-
-		if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
-			    SVGA3D_INVALID_ID))
-			continue;
-
-		ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
-		if (unlikely(ret != 0))
-			return ret;
-
-		entry->bi.scrubbed = false;
-	}
-
-	return 0;
+	return -EINVAL;
 }
 
 /**
@@ -912,10 +830,39 @@
  */
 struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
 {
-	return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
+	struct vmw_user_context *uctx =
+		container_of(ctx, struct vmw_user_context, res);
+
+	return vmw_binding_state_list(uctx->cbs);
 }
 
 struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
 {
 	return container_of(ctx, struct vmw_user_context, res)->man;
 }
+
+struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
+					 SVGACOTableType cotable_type)
+{
+	if (cotable_type >= SVGA_COTABLE_DX10_MAX)
+		return ERR_PTR(-EINVAL);
+
+	return vmw_resource_reference
+		(container_of(ctx, struct vmw_user_context, res)->
+		 cotables[cotable_type]);
+}
+
+/**
+ * vmw_context_binding_state -
+ * Return a pointer to a context binding state structure
+ *
+ * @ctx: The context resource
+ *
+ * Returns the current state of bindings of the given context. Note that
+ * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
+ */
+struct vmw_ctx_binding_state *
+vmw_context_binding_state(struct vmw_resource *ctx)
+{
+	return container_of(ctx, struct vmw_user_context, res)->cbs;
+}