Merge branch 'drm-vmware-next' into drm-core-next

* drm-vmware-next:
  drm/vmwgfx: Bump driver minor
  vmwgfx: Move function declaration to correct header
  drm/vmwgfx: Treat out-of-range initial width and height as host errors
  vmwgfx: Pick up the initial size from the width and height regs
  vmwgfx: Add page flip support
  vmwgfx: Pipe fence out of screen object dirty functions
  vmwgfx: Make it possible to get fence from execbuf
  vmwgfx: Clean up pending event references to struct drm_file objects on close
  vmwgfx: Rework fence event action
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index f390f5f..f076f66 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -38,6 +38,10 @@
 #define VMWGFX_CHIP_SVGAII 0
 #define VMW_FB_RESERVATION 0
 
+#define VMW_MIN_INITIAL_WIDTH 800
+#define VMW_MIN_INITIAL_HEIGHT 600
+
+
 /**
  * Fully encoded drm commands. Might move to vmw_drm.h
  */
@@ -387,6 +391,41 @@
 	BUG_ON(n3d < 0);
 }
 
+/**
+ * Sets the initial_[width|height] fields on the given vmw_private.
+ *
+ * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
+ * clamping the value to fb_max_[width|height] fields and the
+ * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
+ * If the values appear to be invalid, set them to
+ * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
+ */
+static void vmw_get_initial_size(struct vmw_private *dev_priv)
+{
+	uint32_t width;
+	uint32_t height;
+
+	width = vmw_read(dev_priv, SVGA_REG_WIDTH);
+	height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
+
+	width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
+	height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
+
+	if (width > dev_priv->fb_max_width ||
+	    height > dev_priv->fb_max_height) {
+
+		/*
+		 * This is a host error and shouldn't occur.
+		 */
+
+		width = VMW_MIN_INITIAL_WIDTH;
+		height = VMW_MIN_INITIAL_HEIGHT;
+	}
+
+	dev_priv->initial_width = width;
+	dev_priv->initial_height = height;
+}
+
 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 {
 	struct vmw_private *dev_priv;
@@ -441,6 +480,9 @@
 	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
 	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
 	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
+
+	vmw_get_initial_size(dev_priv);
+
 	if (dev_priv->capabilities & SVGA_CAP_GMR) {
 		dev_priv->max_gmr_descriptors =
 			vmw_read(dev_priv,
@@ -688,6 +730,15 @@
 	return 0;
 }
 
+static void vmw_preclose(struct drm_device *dev,
+			 struct drm_file *file_priv)
+{
+	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+	struct vmw_private *dev_priv = vmw_priv(dev);
+
+	vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
+}
+
 static void vmw_postclose(struct drm_device *dev,
 			 struct drm_file *file_priv)
 {
@@ -710,6 +761,7 @@
 	if (unlikely(vmw_fp == NULL))
 		return ret;
 
+	INIT_LIST_HEAD(&vmw_fp->fence_events);
 	vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
 	if (unlikely(vmw_fp->tfile == NULL))
 		goto out_no_tfile;
@@ -1102,6 +1154,7 @@
 	.master_set = vmw_master_set,
 	.master_drop = vmw_master_drop,
 	.open = vmw_driver_open,
+	.preclose = vmw_preclose,
 	.postclose = vmw_postclose,
 	.fops = &vmwgfx_driver_fops,
 	.name = VMWGFX_DRIVER_NAME,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index dc27970..d0f2c07 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,9 +40,9 @@
 #include "ttm/ttm_module.h"
 #include "vmwgfx_fence.h"
 
-#define VMWGFX_DRIVER_DATE "20111025"
+#define VMWGFX_DRIVER_DATE "20120209"
 #define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 3
+#define VMWGFX_DRIVER_MINOR 4
 #define VMWGFX_DRIVER_PATCHLEVEL 0
 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -62,6 +62,7 @@
 struct vmw_fpriv {
 	struct drm_master *locked_master;
 	struct ttm_object_file *tfile;
+	struct list_head fence_events;
 };
 
 struct vmw_dma_buffer {
@@ -202,6 +203,8 @@
 	uint32_t mmio_size;
 	uint32_t fb_max_width;
 	uint32_t fb_max_height;
+	uint32_t initial_width;
+	uint32_t initial_height;
 	__le32 __iomem *mmio_virt;
 	int mmio_mtrr;
 	uint32_t capabilities;
@@ -533,7 +536,8 @@
 			       uint32_t command_size,
 			       uint64_t throttle_us,
 			       struct drm_vmw_fence_rep __user
-			       *user_fence_rep);
+			       *user_fence_rep,
+			       struct vmw_fence_obj **out_fence);
 
 extern void
 vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 40932fb..4acced4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1109,10 +1109,11 @@
 			void *kernel_commands,
 			uint32_t command_size,
 			uint64_t throttle_us,
-			struct drm_vmw_fence_rep __user *user_fence_rep)
+			struct drm_vmw_fence_rep __user *user_fence_rep,
+			struct vmw_fence_obj **out_fence)
 {
 	struct vmw_sw_context *sw_context = &dev_priv->ctx;
-	struct vmw_fence_obj *fence;
+	struct vmw_fence_obj *fence = NULL;
 	uint32_t handle;
 	void *cmd;
 	int ret;
@@ -1208,8 +1209,13 @@
 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
 				    user_fence_rep, fence, handle);
 
-	if (likely(fence != NULL))
+	/* Don't unreference when handing fence out */
+	if (unlikely(out_fence != NULL)) {
+		*out_fence = fence;
+		fence = NULL;
+	} else if (likely(fence != NULL)) {
 		vmw_fence_obj_unreference(&fence);
+	}
 
 	mutex_unlock(&dev_priv->cmdbuf_mutex);
 	return 0;
@@ -1362,7 +1368,8 @@
 	ret = vmw_execbuf_process(file_priv, dev_priv,
 				  (void __user *)(unsigned long)arg->commands,
 				  NULL, arg->command_size, arg->throttle_us,
-				  (void __user *)(unsigned long)arg->fence_rep);
+				  (void __user *)(unsigned long)arg->fence_rep,
+				  NULL);
 
 	if (unlikely(ret != 0))
 		goto out_unlock;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 67f1d54..3c447bf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -414,10 +414,6 @@
 	unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
 	int ret;
 
-	/* XXX These shouldn't be hardcoded. */
-	initial_width = 800;
-	initial_height = 600;
-
 	fb_bpp = 32;
 	fb_depth = 24;
 
@@ -425,8 +421,8 @@
 	fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
 	fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
 
-	initial_width = min(fb_width, initial_width);
-	initial_height = min(fb_height, initial_height);
+	initial_width = min(vmw_priv->initial_width, fb_width);
+	initial_height = min(vmw_priv->initial_height, fb_height);
 
 	fb_pitch = fb_width * fb_bpp / 8;
 	fb_size = fb_pitch * fb_height;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 15fb260..f2fb8f1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -69,12 +69,13 @@
  * be assigned the current time tv_usec val when the fence signals.
  */
 struct vmw_event_fence_action {
-	struct drm_pending_event e;
 	struct vmw_fence_action action;
+	struct list_head fpriv_head;
+
+	struct drm_pending_event *event;
 	struct vmw_fence_obj *fence;
 	struct drm_device *dev;
-	struct kref kref;
-	uint32_t size;
+
 	uint32_t *tv_sec;
 	uint32_t *tv_usec;
 };
@@ -784,46 +785,40 @@
 }
 
 /**
- * vmw_event_fence_action_destroy
+ * vmw_event_fence_fpriv_gone - Remove references to struct drm_file objects
  *
- * @kref: The struct kref embedded in a struct vmw_event_fence_action.
+ * @fman: Pointer to a struct vmw_fence_manager
+ * @event_list: Pointer to linked list of struct vmw_event_fence_action objects
+ * with pointers to a struct drm_file object about to be closed.
  *
- * The vmw_event_fence_action destructor that may be called either after
- * the fence action cleanup, or when the event is delivered.
- * It frees both the vmw_event_fence_action struct and the actual
- * event structure copied to user-space.
+ * This function removes all pending fence events with references to a
+ * specific struct drm_file object about to be closed. The caller is required
+ * to pass a list of all struct vmw_event_fence_action objects with such
+ * events attached. This function is typically called before the
+ * struct drm_file object's event management is taken down.
  */
-static void vmw_event_fence_action_destroy(struct kref *kref)
+void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
+				struct list_head *event_list)
 {
-	struct vmw_event_fence_action *eaction =
-		container_of(kref, struct vmw_event_fence_action, kref);
-	struct ttm_mem_global *mem_glob =
-		vmw_mem_glob(vmw_priv(eaction->dev));
-	uint32_t size = eaction->size;
+	struct vmw_event_fence_action *eaction;
+	struct drm_pending_event *event;
+	unsigned long irq_flags;
 
-	kfree(eaction->e.event);
-	kfree(eaction);
-	ttm_mem_global_free(mem_glob, size);
-}
-
-
-/**
- * vmw_event_fence_action_delivered
- *
- * @e: The struct drm_pending_event embedded in a struct
- * vmw_event_fence_action.
- *
- * The struct drm_pending_event destructor that is called by drm
- * once the event is delivered. Since we don't know whether this function
- * will be called before or after the fence action destructor, we
- * free a refcount and destroy if it becomes zero.
- */
-static void vmw_event_fence_action_delivered(struct drm_pending_event *e)
-{
-	struct vmw_event_fence_action *eaction =
-		container_of(e, struct vmw_event_fence_action, e);
-
-	kref_put(&eaction->kref, vmw_event_fence_action_destroy);
+	while (1) {
+		spin_lock_irqsave(&fman->lock, irq_flags);
+		if (list_empty(event_list))
+			goto out_unlock;
+		eaction = list_first_entry(event_list,
+					   struct vmw_event_fence_action,
+					   fpriv_head);
+		list_del_init(&eaction->fpriv_head);
+		event = eaction->event;
+		eaction->event = NULL;
+		spin_unlock_irqrestore(&fman->lock, irq_flags);
+		event->destroy(event);
+	}
+out_unlock:
+	spin_unlock_irqrestore(&fman->lock, irq_flags);
 }
 
 
@@ -836,18 +831,21 @@
  * This function is called when the seqno of the fence where @action is
  * attached has passed. It queues the event on the submitter's event list.
  * This function is always called from atomic context, and may be called
- * from irq context. It ups a refcount reflecting that we now have two
- * destructors.
+ * from irq context.
  */
 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
 {
 	struct vmw_event_fence_action *eaction =
 		container_of(action, struct vmw_event_fence_action, action);
 	struct drm_device *dev = eaction->dev;
-	struct drm_file *file_priv = eaction->e.file_priv;
+	struct drm_pending_event *event = eaction->event;
+	struct drm_file *file_priv;
 	unsigned long irq_flags;
 
-	kref_get(&eaction->kref);
+	if (unlikely(event == NULL))
+		return;
+
+	file_priv = event->file_priv;
 	spin_lock_irqsave(&dev->event_lock, irq_flags);
 
 	if (likely(eaction->tv_sec != NULL)) {
@@ -858,7 +856,9 @@
 		*eaction->tv_usec = tv.tv_usec;
 	}
 
-	list_add_tail(&eaction->e.link, &file_priv->event_list);
+	list_del_init(&eaction->fpriv_head);
+	list_add_tail(&eaction->event->link, &file_priv->event_list);
+	eaction->event = NULL;
 	wake_up_all(&file_priv->event_wait);
 	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
 }
@@ -876,9 +876,15 @@
 {
 	struct vmw_event_fence_action *eaction =
 		container_of(action, struct vmw_event_fence_action, action);
+	struct vmw_fence_manager *fman = eaction->fence->fman;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&fman->lock, irq_flags);
+	list_del(&eaction->fpriv_head);
+	spin_unlock_irqrestore(&fman->lock, irq_flags);
 
 	vmw_fence_obj_unreference(&eaction->fence);
-	kref_put(&eaction->kref, vmw_event_fence_action_destroy);
+	kfree(eaction);
 }
 
 
@@ -946,39 +952,23 @@
  * an error code, the caller needs to free that object.
  */
 
-int vmw_event_fence_action_create(struct drm_file *file_priv,
-				  struct vmw_fence_obj *fence,
-				  struct drm_event *event,
-				  uint32_t *tv_sec,
-				  uint32_t *tv_usec,
-				  bool interruptible)
+int vmw_event_fence_action_queue(struct drm_file *file_priv,
+				 struct vmw_fence_obj *fence,
+				 struct drm_pending_event *event,
+				 uint32_t *tv_sec,
+				 uint32_t *tv_usec,
+				 bool interruptible)
 {
 	struct vmw_event_fence_action *eaction;
-	struct ttm_mem_global *mem_glob =
-		vmw_mem_glob(fence->fman->dev_priv);
 	struct vmw_fence_manager *fman = fence->fman;
-	uint32_t size = fman->event_fence_action_size +
-		ttm_round_pot(event->length);
-	int ret;
-
-	/*
-	 * Account for internal structure size as well as the
-	 * event size itself.
-	 */
-
-	ret = ttm_mem_global_alloc(mem_glob, size, false, interruptible);
-	if (unlikely(ret != 0))
-		return ret;
+	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+	unsigned long irq_flags;
 
 	eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
-	if (unlikely(eaction == NULL)) {
-		ttm_mem_global_free(mem_glob, size);
+	if (unlikely(eaction == NULL))
 		return -ENOMEM;
-	}
 
-	eaction->e.event = event;
-	eaction->e.file_priv = file_priv;
-	eaction->e.destroy = vmw_event_fence_action_delivered;
+	eaction->event = event;
 
 	eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
 	eaction->action.cleanup = vmw_event_fence_action_cleanup;
@@ -986,16 +976,89 @@
 
 	eaction->fence = vmw_fence_obj_reference(fence);
 	eaction->dev = fman->dev_priv->dev;
-	eaction->size = size;
 	eaction->tv_sec = tv_sec;
 	eaction->tv_usec = tv_usec;
 
-	kref_init(&eaction->kref);
+	spin_lock_irqsave(&fman->lock, irq_flags);
+	list_add_tail(&eaction->fpriv_head, &vmw_fp->fence_events);
+	spin_unlock_irqrestore(&fman->lock, irq_flags);
+
 	vmw_fence_obj_add_action(fence, &eaction->action);
 
 	return 0;
 }
 
+struct vmw_event_fence_pending {
+	struct drm_pending_event base;
+	struct drm_vmw_event_fence event;
+};
+
+int vmw_event_fence_action_create(struct drm_file *file_priv,
+				  struct vmw_fence_obj *fence,
+				  uint32_t flags,
+				  uint64_t user_data,
+				  bool interruptible)
+{
+	struct vmw_event_fence_pending *event;
+	struct drm_device *dev = fence->fman->dev_priv->dev;
+	unsigned long irq_flags;
+	int ret;
+
+	spin_lock_irqsave(&dev->event_lock, irq_flags);
+
+	ret = (file_priv->event_space < sizeof(event->event)) ? -EBUSY : 0;
+	if (likely(ret == 0))
+		file_priv->event_space -= sizeof(event->event);
+
+	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to allocate event space for this file.\n");
+		goto out_no_space;
+	}
+
+
+	event = kzalloc(sizeof(event->event), GFP_KERNEL);
+	if (unlikely(event == NULL)) {
+		DRM_ERROR("Failed to allocate an event.\n");
+		ret = -ENOMEM;
+		goto out_no_event;
+	}
+
+	event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
+	event->event.base.length = sizeof(*event);
+	event->event.user_data = user_data;
+
+	event->base.event = &event->event.base;
+	event->base.file_priv = file_priv;
+	event->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+
+	if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
+		ret = vmw_event_fence_action_queue(file_priv, fence,
+						   &event->base,
+						   &event->event.tv_sec,
+						   &event->event.tv_usec,
+						   interruptible);
+	else
+		ret = vmw_event_fence_action_queue(file_priv, fence,
+						   &event->base,
+						   NULL,
+						   NULL,
+						   interruptible);
+	if (ret != 0)
+		goto out_no_queue;
+
+out_no_queue:
+	event->base.destroy(&event->base);
+out_no_event:
+	spin_lock_irqsave(&dev->event_lock, irq_flags);
+	file_priv->event_space += sizeof(*event);
+	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+out_no_space:
+	return ret;
+}
+
 int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
 			  struct drm_file *file_priv)
 {
@@ -1008,8 +1071,6 @@
 		(struct drm_vmw_fence_rep __user *)(unsigned long)
 		arg->fence_rep;
 	uint32_t handle;
-	unsigned long irq_flags;
-	struct drm_vmw_event_fence *event;
 	int ret;
 
 	/*
@@ -1062,59 +1123,28 @@
 
 	BUG_ON(fence == NULL);
 
-	spin_lock_irqsave(&dev->event_lock, irq_flags);
-
-	ret = (file_priv->event_space < sizeof(*event)) ? -EBUSY : 0;
-	if (likely(ret == 0))
-		file_priv->event_space -= sizeof(*event);
-
-	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
-
-	if (unlikely(ret != 0)) {
-		DRM_ERROR("Failed to allocate event space for this file.\n");
-		goto out_no_event_space;
-	}
-
-	event = kzalloc(sizeof(*event), GFP_KERNEL);
-	if (unlikely(event == NULL)) {
-		DRM_ERROR("Failed to allocate an event.\n");
-		goto out_no_event;
-	}
-
-	event->base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
-	event->base.length = sizeof(*event);
-	event->user_data = arg->user_data;
-
 	if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
 		ret = vmw_event_fence_action_create(file_priv, fence,
-						    &event->base,
-						    &event->tv_sec,
-						    &event->tv_usec,
+						    arg->flags,
+						    arg->user_data,
 						    true);
 	else
 		ret = vmw_event_fence_action_create(file_priv, fence,
-						    &event->base,
-						    NULL,
-						    NULL,
+						    arg->flags,
+						    arg->user_data,
 						    true);
 
 	if (unlikely(ret != 0)) {
 		if (ret != -ERESTARTSYS)
 			DRM_ERROR("Failed to attach event to fence.\n");
-		goto out_no_attach;
+		goto out_no_create;
 	}
 
 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
 				    handle);
 	vmw_fence_obj_unreference(&fence);
 	return 0;
-out_no_attach:
-	kfree(event);
-out_no_event:
-	spin_lock_irqsave(&dev->event_lock, irq_flags);
-	file_priv->event_space += sizeof(*event);
-	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
-out_no_event_space:
+out_no_create:
 	if (user_fence_rep != NULL)
 		ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
 					  handle, TTM_REF_USAGE);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index 0854a20..faf2e78 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -109,5 +109,12 @@
 				     struct drm_file *file_priv);
 extern int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
 				 struct drm_file *file_priv);
-
+extern void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
+				       struct list_head *event_list);
+extern int vmw_event_fence_action_queue(struct drm_file *filee_priv,
+					struct vmw_fence_obj *fence,
+					struct drm_pending_event *event,
+					uint32_t *tv_sec,
+					uint32_t *tv_usec,
+					bool interruptible);
 #endif /* _VMWGFX_FENCE_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index b66ef0e..2286d47 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -422,7 +422,8 @@
 				struct vmw_framebuffer *framebuffer,
 				unsigned flags, unsigned color,
 				struct drm_clip_rect *clips,
-				unsigned num_clips, int inc)
+				unsigned num_clips, int inc,
+				struct vmw_fence_obj **out_fence)
 {
 	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
 	struct drm_clip_rect *clips_ptr;
@@ -542,12 +543,15 @@
 		if (num == 0)
 			continue;
 
+		/* only return the last fence */
+		if (out_fence && *out_fence)
+			vmw_fence_obj_unreference(out_fence);
 
 		/* recalculate package length */
 		fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
 		cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
 		ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
-					  fifo_size, 0, NULL);
+					  fifo_size, 0, NULL, out_fence);
 
 		if (unlikely(ret != 0))
 			break;
@@ -598,7 +602,7 @@
 
 	ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base,
 				   flags, color,
-				   clips, num_clips, inc);
+				   clips, num_clips, inc, NULL);
 
 	ttm_read_unlock(&vmaster->lock);
 	return 0;
@@ -809,7 +813,7 @@
 	cmd->body.ptr.offset = 0;
 
 	ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
-				  fifo_size, 0, NULL);
+				  fifo_size, 0, NULL, NULL);
 
 	kfree(cmd);
 
@@ -821,7 +825,8 @@
 			       struct vmw_framebuffer *framebuffer,
 			       unsigned flags, unsigned color,
 			       struct drm_clip_rect *clips,
-			       unsigned num_clips, int increment)
+			       unsigned num_clips, int increment,
+			       struct vmw_fence_obj **out_fence)
 {
 	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
 	struct drm_clip_rect *clips_ptr;
@@ -894,9 +899,13 @@
 		if (hit_num == 0)
 			continue;
 
+		/* only return the last fence */
+		if (out_fence && *out_fence)
+			vmw_fence_obj_unreference(out_fence);
+
 		fifo_size = sizeof(*blits) * hit_num;
 		ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits,
-					  fifo_size, 0, NULL);
+					  fifo_size, 0, NULL, out_fence);
 
 		if (unlikely(ret != 0))
 			break;
@@ -942,7 +951,7 @@
 	} else {
 		ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base,
 					  flags, color,
-					  clips, num_clips, increment);
+					  clips, num_clips, increment, NULL);
 	}
 
 	ttm_read_unlock(&vmaster->lock);
@@ -1296,7 +1305,7 @@
 		fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
 		cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
 		ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
-					  fifo_size, 0, NULL);
+					  fifo_size, 0, NULL, NULL);
 
 		if (unlikely(ret != 0))
 			break;
@@ -1409,7 +1418,7 @@
 	fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos;
 
 	ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size,
-				  0, user_fence_rep);
+				  0, user_fence_rep, NULL);
 
 	kfree(cmd);
 
@@ -1672,6 +1681,70 @@
 	return 0;
 }
 
+int vmw_du_page_flip(struct drm_crtc *crtc,
+		     struct drm_framebuffer *fb,
+		     struct drm_pending_vblank_event *event)
+{
+	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+	struct drm_framebuffer *old_fb = crtc->fb;
+	struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
+	struct drm_file *file_priv = event->base.file_priv;
+	struct vmw_fence_obj *fence = NULL;
+	struct drm_clip_rect clips;
+	int ret;
+
+	/* require ScreenObject support for page flipping */
+	if (!dev_priv->sou_priv)
+		return -ENOSYS;
+
+	if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
+		return -EINVAL;
+
+	crtc->fb = fb;
+
+	/* do a full screen dirty update */
+	clips.x1 = clips.y1 = 0;
+	clips.x2 = fb->width;
+	clips.y2 = fb->height;
+
+	if (vfb->dmabuf)
+		ret = do_dmabuf_dirty_sou(file_priv, dev_priv, vfb,
+					  0, 0, &clips, 1, 1, &fence);
+	else
+		ret = do_surface_dirty_sou(dev_priv, file_priv, vfb,
+					   0, 0, &clips, 1, 1, &fence);
+
+
+	if (ret != 0)
+		goto out_no_fence;
+	if (!fence) {
+		ret = -EINVAL;
+		goto out_no_fence;
+	}
+
+	ret = vmw_event_fence_action_queue(file_priv, fence,
+					   &event->base,
+					   &event->event.tv_sec,
+					   &event->event.tv_usec,
+					   true);
+
+	/*
+	 * No need to hold on to this now. The only cleanup
+	 * we need to do if we fail is unref the fence.
+	 */
+	vmw_fence_obj_unreference(&fence);
+
+	if (vmw_crtc_to_du(crtc)->is_implicit)
+		vmw_kms_screen_object_update_implicit_fb(dev_priv, crtc);
+
+	return ret;
+
+out_no_fence:
+	crtc->fb = old_fb;
+	return ret;
+}
+
+
 void vmw_du_crtc_save(struct drm_crtc *crtc)
 {
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index a4f7f03..8184bc5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -121,6 +121,9 @@
  * Shared display unit functions - vmwgfx_kms.c
  */
 void vmw_display_unit_cleanup(struct vmw_display_unit *du);
+int vmw_du_page_flip(struct drm_crtc *crtc,
+		     struct drm_framebuffer *fb,
+		     struct drm_pending_vblank_event *event);
 void vmw_du_crtc_save(struct drm_crtc *crtc);
 void vmw_du_crtc_restore(struct drm_crtc *crtc);
 void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
@@ -154,5 +157,10 @@
 int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv);
 int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num,
 			      struct drm_vmw_rect *rects);
+bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
+				     struct drm_crtc *crtc);
+void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
+					      struct drm_crtc *crtc);
+
 
 #endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index f77b184..070fb23 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -354,8 +354,8 @@
 	INIT_LIST_HEAD(&ldu->active);
 
 	ldu->base.pref_active = (unit == 0);
-	ldu->base.pref_width = 800;
-	ldu->base.pref_height = 600;
+	ldu->base.pref_width = dev_priv->initial_width;
+	ldu->base.pref_height = dev_priv->initial_height;
 	ldu->base.pref_mode = NULL;
 	ldu->base.is_implicit = true;
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 4defdcf..6deaf2f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -394,6 +394,7 @@
 	.gamma_set = vmw_du_crtc_gamma_set,
 	.destroy = vmw_sou_crtc_destroy,
 	.set_config = vmw_sou_crtc_set_config,
+	.page_flip = vmw_du_page_flip,
 };
 
 /*
@@ -448,8 +449,8 @@
 	sou->active_implicit = false;
 
 	sou->base.pref_active = (unit == 0);
-	sou->base.pref_width = 800;
-	sou->base.pref_height = 600;
+	sou->base.pref_width = dev_priv->initial_width;
+	sou->base.pref_height = dev_priv->initial_height;
 	sou->base.pref_mode = NULL;
 	sou->base.is_implicit = true;
 
@@ -535,3 +536,36 @@
 
 	return 0;
 }
+
+/**
+ * Returns if this unit can be page flipped.
+ * Must be called with the mode_config mutex held.
+ */
+bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
+				     struct drm_crtc *crtc)
+{
+	struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+
+	if (!sou->base.is_implicit)
+		return true;
+
+	if (dev_priv->sou_priv->num_implicit != 1)
+		return false;
+
+	return true;
+}
+
+/**
+ * Update the implicit fb to the current fb of this crtc.
+ * Must be called with the mode_config mutex held.
+ */
+void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
+					      struct drm_crtc *crtc)
+{
+	struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+
+	BUG_ON(!sou->base.is_implicit);
+
+	dev_priv->sou_priv->implicit_fb =
+		vmw_framebuffer_to_vfb(sou->base.crtc.fb);
+}