drm/i915: Split i915_gem_execbuffer into its own file.

A number of dragons have been seen lurking within the execbuffer code.
The first step is then to isolate them from the rest and begin to
scrutinise them in depth. Suggested by Daniel Vetter.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4ad34f9..6c10b64 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1083,6 +1083,10 @@
 				struct drm_file *file_priv);
 void i915_gem_load(struct drm_device *dev);
 int i915_gem_init_object(struct drm_gem_object *obj);
+void i915_gem_flush_ring(struct drm_device *dev,
+			 struct intel_ring_buffer *ring,
+			 uint32_t invalidate_domains,
+			 uint32_t flush_domains);
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 						  size_t size);
 void i915_gem_free_object(struct drm_gem_object *obj);
@@ -1094,6 +1098,12 @@
 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
 void i915_gem_lastclose(struct drm_device *dev);
 
+int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
+int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+						bool interruptible);
+void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+				    struct intel_ring_buffer *ring);
+
 /**
  * Returns true if seq1 is later than seq2.
  */
@@ -1103,6 +1113,14 @@
 	return (int32_t)(seq1 - seq2) >= 0;
 }
 
+static inline u32
+i915_gem_next_request_seqno(struct drm_device *dev,
+			    struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	return ring->outstanding_lazy_request = dev_priv->next_seqno;
+}
+
 int __must_check i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj,
 					       bool interruptible);
 int __must_check i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj,