drm/msm: introduce msm_fence_context

Better encapsulate the per-timeline stuff into fence-context.  For now
there is just a single fence-context, but eventually we'll also have one
per-CRTC to enable fully explicit fencing.

Signed-off-by: Rob Clark <robdclark@gmail.com>
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 4951172..9aab871 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -121,7 +121,7 @@
 	gpu->rb->cur = gpu->rb->start;
 
 	/* reset completed fence seqno, just discard anything pending: */
-	adreno_gpu->memptrs->fence = gpu->submitted_fence;
+	adreno_gpu->memptrs->fence = gpu->fctx->last_fence;
 	adreno_gpu->memptrs->rptr  = 0;
 	adreno_gpu->memptrs->wptr  = 0;
 
@@ -254,7 +254,7 @@
 			adreno_gpu->rev.patchid);
 
 	seq_printf(m, "fence:    %d/%d\n", adreno_gpu->memptrs->fence,
-			gpu->submitted_fence);
+			gpu->fctx->last_fence);
 	seq_printf(m, "rptr:     %d\n", get_rptr(adreno_gpu));
 	seq_printf(m, "wptr:     %d\n", adreno_gpu->memptrs->wptr);
 	seq_printf(m, "rb wptr:  %d\n", get_wptr(gpu->rb));
@@ -295,7 +295,7 @@
 			adreno_gpu->rev.patchid);
 
 	printk("fence:    %d/%d\n", adreno_gpu->memptrs->fence,
-			gpu->submitted_fence);
+			gpu->fctx->last_fence);
 	printk("rptr:     %d\n", get_rptr(adreno_gpu));
 	printk("wptr:     %d\n", adreno_gpu->memptrs->wptr);
 	printk("rb wptr:  %d\n", get_wptr(gpu->rb));
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index fab0c2d..a2a3d9f 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -18,6 +18,7 @@
 #include "msm_drv.h"
 #include "msm_kms.h"
 #include "msm_gem.h"
+#include "msm_gpu.h"   /* temporary */
 #include "msm_fence.h"
 
 struct msm_commit {
@@ -202,6 +203,7 @@
 int msm_atomic_commit(struct drm_device *dev,
 		struct drm_atomic_state *state, bool nonblock)
 {
+	struct msm_drm_private *priv = dev->dev_private;
 	int nplanes = dev->mode_config.num_total_plane;
 	int ncrtcs = dev->mode_config.num_crtc;
 	ktime_t timeout;
@@ -276,15 +278,16 @@
 	 * current layout.
 	 */
 
-	if (nonblock) {
-		msm_queue_fence_cb(dev, &c->fence_cb, c->fence);
+	if (nonblock && priv->gpu) {
+		msm_queue_fence_cb(priv->gpu->fctx, &c->fence_cb, c->fence);
 		return 0;
 	}
 
 	timeout = ktime_add_ms(ktime_get(), 1000);
 
 	/* uninterruptible wait */
-	msm_wait_fence(dev, c->fence, &timeout, false);
+	if (priv->gpu)
+		msm_wait_fence(priv->gpu->fctx, c->fence, &timeout, false);
 
 	complete_commit(c);
 
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index d4a1a11..2b859f3 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -339,11 +339,9 @@
 	dev->dev_private = priv;
 
 	priv->wq = alloc_ordered_workqueue("msm", 0);
-	init_waitqueue_head(&priv->fence_event);
 	init_waitqueue_head(&priv->pending_crtcs_event);
 
 	INIT_LIST_HEAD(&priv->inactive_list);
-	INIT_LIST_HEAD(&priv->fence_cbs);
 	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
 	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
 	spin_lock_init(&priv->vblank_ctrl.lock);
@@ -647,6 +645,7 @@
 static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
 		struct drm_file *file)
 {
+	struct msm_drm_private *priv = dev->dev_private;
 	struct drm_msm_wait_fence *args = data;
 	ktime_t timeout = to_ktime(args->timeout);
 
@@ -655,7 +654,10 @@
 		return -EINVAL;
 	}
 
-	return msm_wait_fence(dev, args->fence, &timeout, true);
+	if (!priv->gpu)
+		return 0;
+
+	return msm_wait_fence(priv->gpu->fctx, args->fence, &timeout, true);
 }
 
 static const struct drm_ioctl_desc msm_ioctls[] = {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index af007ac..6c3f67b 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -49,6 +49,7 @@
 struct msm_rd_state;
 struct msm_perf_state;
 struct msm_gem_submit;
+struct msm_fence_context;
 struct msm_fence_cb;
 
 #define NUM_DOMAINS 2    /* one for KMS, then one per gpu core (?) */
@@ -101,9 +102,6 @@
 
 	struct drm_fb_helper *fbdev;
 
-	uint32_t next_fence, completed_fence;
-	wait_queue_head_t fence_event;
-
 	struct msm_rd_state *rd;
 	struct msm_perf_state *perf;
 
@@ -112,9 +110,6 @@
 
 	struct workqueue_struct *wq;
 
-	/* callbacks deferred until bo is inactive: */
-	struct list_head fence_cbs;
-
 	/* crtcs pending async atomic updates: */
 	uint32_t pending_crtcs;
 	wait_queue_head_t pending_crtcs_event;
@@ -194,8 +189,6 @@
 void msm_gem_prime_unpin(struct drm_gem_object *obj);
 void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
 void *msm_gem_vaddr(struct drm_gem_object *obj);
-int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
-		struct msm_fence_cb *cb);
 void msm_gem_move_to_active(struct drm_gem_object *obj,
 		struct msm_gpu *gpu, bool write, uint32_t fence);
 void msm_gem_move_to_inactive(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index 002eecb..f0ed6a6 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -15,49 +15,68 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/fence.h>
+
 #include "msm_drv.h"
 #include "msm_fence.h"
-#include "msm_gpu.h"
 
-static inline bool fence_completed(struct drm_device *dev, uint32_t fence)
+
+struct msm_fence_context *
+msm_fence_context_alloc(struct drm_device *dev, const char *name)
 {
-	struct msm_drm_private *priv = dev->dev_private;
-	return (int32_t)(priv->completed_fence - fence) >= 0;
+	struct msm_fence_context *fctx;
+
+	fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
+	if (!fctx)
+		return ERR_PTR(-ENOMEM);
+
+	fctx->dev = dev;
+	fctx->name = name;
+	init_waitqueue_head(&fctx->event);
+	INIT_LIST_HEAD(&fctx->fence_cbs);
+
+	return fctx;
 }
 
-int msm_wait_fence(struct drm_device *dev, uint32_t fence,
-		ktime_t *timeout , bool interruptible)
+void msm_fence_context_free(struct msm_fence_context *fctx)
 {
-	struct msm_drm_private *priv = dev->dev_private;
+	kfree(fctx);
+}
+
+static inline bool fence_completed(struct msm_fence_context *fctx, uint32_t fence)
+{
+	return (int32_t)(fctx->completed_fence - fence) >= 0;
+}
+
+int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
+		ktime_t *timeout, bool interruptible)
+{
 	int ret;
 
-	if (!priv->gpu)
-		return 0;
-
-	if (fence > priv->gpu->submitted_fence) {
-		DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
-				fence, priv->gpu->submitted_fence);
+	if (fence > fctx->last_fence) {
+		DRM_ERROR("%s: waiting on invalid fence: %u (of %u)\n",
+				fctx->name, fence, fctx->last_fence);
 		return -EINVAL;
 	}
 
 	if (!timeout) {
 		/* no-wait: */
-		ret = fence_completed(dev, fence) ? 0 : -EBUSY;
+		ret = fence_completed(fctx, fence) ? 0 : -EBUSY;
 	} else {
 		unsigned long remaining_jiffies = timeout_to_jiffies(timeout);
 
 		if (interruptible)
-			ret = wait_event_interruptible_timeout(priv->fence_event,
-				fence_completed(dev, fence),
+			ret = wait_event_interruptible_timeout(fctx->event,
+				fence_completed(fctx, fence),
 				remaining_jiffies);
 		else
-			ret = wait_event_timeout(priv->fence_event,
-				fence_completed(dev, fence),
+			ret = wait_event_timeout(fctx->event,
+				fence_completed(fctx, fence),
 				remaining_jiffies);
 
 		if (ret == 0) {
 			DBG("timeout waiting for fence: %u (completed: %u)",
-					fence, priv->completed_fence);
+					fence, fctx->completed_fence);
 			ret = -ETIMEDOUT;
 		} else if (ret != -ERESTARTSYS) {
 			ret = 0;
@@ -67,50 +86,50 @@
 	return ret;
 }
 
-int msm_queue_fence_cb(struct drm_device *dev,
+int msm_queue_fence_cb(struct msm_fence_context *fctx,
 		struct msm_fence_cb *cb, uint32_t fence)
 {
-	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_drm_private *priv = fctx->dev->dev_private;
 	int ret = 0;
 
-	mutex_lock(&dev->struct_mutex);
+	mutex_lock(&fctx->dev->struct_mutex);
 	if (!list_empty(&cb->work.entry)) {
 		ret = -EINVAL;
-	} else if (fence > priv->completed_fence) {
+	} else if (fence > fctx->completed_fence) {
 		cb->fence = fence;
-		list_add_tail(&cb->work.entry, &priv->fence_cbs);
+		list_add_tail(&cb->work.entry, &fctx->fence_cbs);
 	} else {
 		queue_work(priv->wq, &cb->work);
 	}
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&fctx->dev->struct_mutex);
 
 	return ret;
 }
 
 /* called from workqueue */
-void msm_update_fence(struct drm_device *dev, uint32_t fence)
+void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
 {
-	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_drm_private *priv = fctx->dev->dev_private;
 
-	mutex_lock(&dev->struct_mutex);
-	priv->completed_fence = max(fence, priv->completed_fence);
+	mutex_lock(&fctx->dev->struct_mutex);
+	fctx->completed_fence = max(fence, fctx->completed_fence);
 
-	while (!list_empty(&priv->fence_cbs)) {
+	while (!list_empty(&fctx->fence_cbs)) {
 		struct msm_fence_cb *cb;
 
-		cb = list_first_entry(&priv->fence_cbs,
+		cb = list_first_entry(&fctx->fence_cbs,
 				struct msm_fence_cb, work.entry);
 
-		if (cb->fence > priv->completed_fence)
+		if (cb->fence > fctx->completed_fence)
 			break;
 
 		list_del_init(&cb->work.entry);
 		queue_work(priv->wq, &cb->work);
 	}
 
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&fctx->dev->struct_mutex);
 
-	wake_up_all(&priv->fence_event);
+	wake_up_all(&fctx->event);
 }
 
 void __msm_fence_worker(struct work_struct *work)
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h
index 6ddb81c..3ed2098 100644
--- a/drivers/gpu/drm/msm/msm_fence.h
+++ b/drivers/gpu/drm/msm/msm_fence.h
@@ -20,6 +20,21 @@
 
 #include "msm_drv.h"
 
+struct msm_fence_context {
+	struct drm_device *dev;
+	const char *name;
+	/* last_fence == completed_fence --> no pending work */
+	uint32_t last_fence;          /* last assigned fence */
+	uint32_t completed_fence;     /* last completed fence */
+	wait_queue_head_t event;
+	/* callbacks deferred until bo is inactive: */
+	struct list_head fence_cbs;
+};
+
+struct msm_fence_context * msm_fence_context_alloc(struct drm_device *dev,
+		const char *name);
+void msm_fence_context_free(struct msm_fence_context *fctx);
+
 /* callback from wq once fence has passed: */
 struct msm_fence_cb {
 	struct work_struct work;
@@ -34,10 +49,10 @@
 		(_cb)->func = _func;                         \
 	} while (0)
 
-int msm_wait_fence(struct drm_device *dev, uint32_t fence,
+int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
 		ktime_t *timeout, bool interruptible);
-int msm_queue_fence_cb(struct drm_device *dev,
+int msm_queue_fence_cb(struct msm_fence_context *fctx,
 		struct msm_fence_cb *cb, uint32_t fence);
-void msm_update_fence(struct drm_device *dev, uint32_t fence);
+void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
 
 #endif
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 09e2190..80aba76 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -411,18 +411,6 @@
 	return ret;
 }
 
-/* setup callback for when bo is no longer busy..
- * TODO probably want to differentiate read vs write..
- */
-int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
-		struct msm_fence_cb *cb)
-{
-	struct msm_gem_object *msm_obj = to_msm_bo(obj);
-	uint32_t fence = msm_gem_fence(msm_obj,
-			MSM_PREP_READ | MSM_PREP_WRITE);
-	return msm_queue_fence_cb(obj->dev, cb, fence);
-}
-
 void msm_gem_move_to_active(struct drm_gem_object *obj,
 		struct msm_gpu *gpu, bool write, uint32_t fence)
 {
@@ -454,6 +442,7 @@
 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
 {
 	struct drm_device *dev = obj->dev;
+	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	int ret = 0;
 
@@ -463,7 +452,8 @@
 		if (op & MSM_PREP_NOSYNC)
 			timeout = NULL;
 
-		ret = msm_wait_fence(dev, fence, timeout, true);
+		if (priv->gpu)
+			ret = msm_wait_fence(priv->gpu->fctx, fence, timeout, true);
 	}
 
 	/* TODO cache maintenance */
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 80efe56..8f0b295 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -313,7 +313,7 @@
 	if (fence != gpu->hangcheck_fence) {
 		/* some progress has been made.. ya! */
 		gpu->hangcheck_fence = fence;
-	} else if (fence < gpu->submitted_fence) {
+	} else if (fence < gpu->fctx->last_fence) {
 		/* no progress and not done.. hung! */
 		gpu->hangcheck_fence = fence;
 		dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
@@ -321,12 +321,12 @@
 		dev_err(dev->dev, "%s:     completed fence: %u\n",
 				gpu->name, fence);
 		dev_err(dev->dev, "%s:     submitted fence: %u\n",
-				gpu->name, gpu->submitted_fence);
+				gpu->name, gpu->fctx->last_fence);
 		queue_work(priv->wq, &gpu->recover_work);
 	}
 
 	/* if still more pending work, reset the hangcheck timer: */
-	if (gpu->submitted_fence > gpu->hangcheck_fence)
+	if (gpu->fctx->last_fence > gpu->hangcheck_fence)
 		hangcheck_timer_reset(gpu);
 
 	/* workaround for missing irq: */
@@ -474,7 +474,7 @@
 	struct drm_device *dev = gpu->dev;
 	uint32_t fence = gpu->funcs->last_fence(gpu);
 
-	msm_update_fence(gpu->dev, fence);
+	msm_update_fence(gpu->fctx, fence);
 
 	mutex_lock(&dev->struct_mutex);
 	retire_submits(gpu, fence);
@@ -502,9 +502,7 @@
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-	submit->fence = ++priv->next_fence;
-
-	gpu->submitted_fence = submit->fence;
+	submit->fence = ++gpu->fctx->last_fence;
 
 	inactive_cancel(gpu);
 
@@ -512,8 +510,6 @@
 
 	msm_rd_dump_submit(submit);
 
-	gpu->submitted_fence = submit->fence;
-
 	update_sw_cntrs(gpu);
 
 	for (i = 0; i < submit->nr_bos; i++) {
@@ -574,6 +570,12 @@
 	gpu->funcs = funcs;
 	gpu->name = name;
 	gpu->inactive = true;
+	gpu->fctx = msm_fence_context_alloc(drm, name);
+	if (IS_ERR(gpu->fctx)) {
+		ret = PTR_ERR(gpu->fctx);
+		gpu->fctx = NULL;
+		goto fail;
+	}
 
 	INIT_LIST_HEAD(&gpu->active_list);
 	INIT_WORK(&gpu->retire_work, retire_worker);
@@ -694,4 +696,7 @@
 
 	if (gpu->mmu)
 		gpu->mmu->funcs->destroy(gpu->mmu);
+
+	if (gpu->fctx)
+		msm_fence_context_free(gpu->fctx);
 }
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 2bbe85a..025e25b 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -22,6 +22,7 @@
 #include <linux/regulator/consumer.h>
 
 #include "msm_drv.h"
+#include "msm_fence.h"
 #include "msm_ringbuffer.h"
 
 struct msm_gem_submit;
@@ -77,13 +78,15 @@
 	const struct msm_gpu_perfcntr *perfcntrs;
 	uint32_t num_perfcntrs;
 
+	/* ringbuffer: */
 	struct msm_ringbuffer *rb;
 	uint32_t rb_iova;
 
 	/* list of GEM active objects: */
 	struct list_head active_list;
 
-	uint32_t submitted_fence;
+	/* fencing: */
+	struct msm_fence_context *fctx;
 
 	/* is gpu powered/active? */
 	int active_cnt;
@@ -125,7 +128,7 @@
 
 static inline bool msm_gpu_active(struct msm_gpu *gpu)
 {
-	return gpu->submitted_fence > gpu->funcs->last_fence(gpu);
+	return gpu->fctx->last_fence > gpu->funcs->last_fence(gpu);
 }
 
 /* Perf-Counters: