drm/amd: add scheduler fence implementation (v2)

scheduler fence is based on kernel fence framework.

v2: squash in Christian's build fix

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Christian K?nig <christian.koenig@amd.com>
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 33b4f55..402086d 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -180,6 +180,7 @@
 			  uint32_t jobs)
 {
 	uint64_t seq_ring = 0;
+	char name[20];
 
 	if (!(sched && entity && rq))
 		return -EINVAL;
@@ -191,6 +192,10 @@
 	entity->scheduler = sched;
 	init_waitqueue_head(&entity->wait_queue);
 	init_waitqueue_head(&entity->wait_emit);
+	entity->fence_context = fence_context_alloc(1);
+	snprintf(name, sizeof(name), "c_entity[%llu]", entity->fence_context);
+	memcpy(entity->name, name, 20);
+	INIT_LIST_HEAD(&entity->fence_list);
 	if(kfifo_alloc(&entity->job_queue,
 		       jobs * sizeof(void *),
 		       GFP_KERNEL))
@@ -199,6 +204,7 @@
 	spin_lock_init(&entity->queue_lock);
 	atomic64_set(&entity->last_emitted_v_seq, seq_ring);
 	atomic64_set(&entity->last_queued_v_seq, seq_ring);
+	atomic64_set(&entity->last_signaled_v_seq, seq_ring);
 
 	/* Add the entity to the run queue */
 	mutex_lock(&rq->lock);
@@ -291,15 +297,25 @@
 */
 int amd_sched_push_job(struct amd_gpu_scheduler *sched,
 		       struct amd_sched_entity *c_entity,
-		       void *data)
+		       void *data,
+		       struct amd_sched_fence **fence)
 {
-	struct amd_sched_job *job = kzalloc(sizeof(struct amd_sched_job),
-					    GFP_KERNEL);
+	struct amd_sched_job *job;
+
+	if (!fence)
+		return -EINVAL;
+	job = kzalloc(sizeof(struct amd_sched_job), GFP_KERNEL);
 	if (!job)
 		return -ENOMEM;
 	job->sched = sched;
 	job->s_entity = c_entity;
 	job->data = data;
+	*fence = amd_sched_fence_create(c_entity);
+	if ((*fence) == NULL) {
+		kfree(job);
+		return -EINVAL;
+	}
+	job->s_fence = *fence;
 	while (kfifo_in_spinlocked(&c_entity->job_queue, &job, sizeof(void *),
 				   &c_entity->queue_lock) != sizeof(void *)) {
 		/**
@@ -368,12 +384,16 @@
 	unsigned long flags;
 
 	sched = sched_job->sched;
+	atomic64_set(&sched_job->s_entity->last_signaled_v_seq,
+		     sched_job->s_fence->v_seq);
+	amd_sched_fence_signal(sched_job->s_fence);
 	spin_lock_irqsave(&sched->queue_lock, flags);
 	list_del(&sched_job->list);
 	atomic64_dec(&sched->hw_rq_count);
 	spin_unlock_irqrestore(&sched->queue_lock, flags);
 
 	sched->ops->process_job(sched, sched_job);
+	fence_put(&sched_job->s_fence->base);
 	kfree(sched_job);
 	wake_up_interruptible(&sched->wait_queue);
 }
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index f54615d..300132f 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -45,6 +45,7 @@
 	/* the virtual_seq is unique per context per ring */
 	atomic64_t			last_queued_v_seq;
 	atomic64_t			last_emitted_v_seq;
+	atomic64_t			last_signaled_v_seq;
 	/* the job_queue maintains the jobs submitted by clients */
 	struct kfifo                    job_queue;
 	spinlock_t			queue_lock;
@@ -52,6 +53,9 @@
 	wait_queue_head_t		wait_queue;
 	wait_queue_head_t		wait_emit;
 	bool                            is_pending;
+	uint64_t                        fence_context;
+	struct list_head		fence_list;
+	char                            name[20];
 };
 
 /**
@@ -72,14 +76,35 @@
 	int (*check_entity_status)(struct amd_sched_entity *entity);
 };
 
+struct amd_sched_fence {
+	struct fence                    base;
+	struct fence_cb                 cb;
+	struct list_head		list;
+	struct amd_sched_entity	        *entity;
+	uint64_t			v_seq;
+	spinlock_t			lock;
+};
+
 struct amd_sched_job {
 	struct list_head		list;
 	struct fence_cb                 cb;
 	struct amd_gpu_scheduler        *sched;
 	struct amd_sched_entity         *s_entity;
 	void                            *data;
+	struct amd_sched_fence          *s_fence;
 };
 
+extern const struct fence_ops amd_sched_fence_ops;
+static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
+{
+	struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base);
+
+	if (__f->base.ops == &amd_sched_fence_ops)
+		return __f;
+
+	return NULL;
+}
+
 /**
  * Define the backend operations called by the scheduler,
  * these functions should be implemented in driver side
@@ -126,7 +151,8 @@
 
 int amd_sched_push_job(struct amd_gpu_scheduler *sched,
 		       struct amd_sched_entity *c_entity,
-		       void *data);
+		       void *data,
+		       struct amd_sched_fence **fence);
 
 int amd_sched_wait_emit(struct amd_sched_entity *c_entity,
 			uint64_t seq,
@@ -146,4 +172,9 @@
 
 uint64_t amd_sched_next_queued_seq(struct amd_sched_entity *c_entity);
 
+struct amd_sched_fence *amd_sched_fence_create(
+	struct amd_sched_entity *s_entity);
+void amd_sched_fence_signal(struct amd_sched_fence *fence);
+
+
 #endif
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
new file mode 100644
index 0000000..d580a35
--- /dev/null
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <drm/drmP.h>
+#include "gpu_scheduler.h"
+
+static void amd_sched_fence_wait_cb(struct fence *f, struct fence_cb *cb)
+{
+	struct amd_sched_fence *fence =
+		container_of(cb, struct amd_sched_fence, cb);
+	list_del_init(&fence->list);
+	fence_put(&fence->base);
+}
+
+struct amd_sched_fence *amd_sched_fence_create(
+	struct amd_sched_entity *s_entity)
+{
+	struct amd_sched_fence *fence = NULL;
+	fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL);
+	if (fence == NULL)
+		return NULL;
+	fence->v_seq = atomic64_inc_return(&s_entity->last_queued_v_seq);
+	fence->entity = s_entity;
+	spin_lock_init(&fence->lock);
+	fence_init(&fence->base, &amd_sched_fence_ops,
+		&fence->lock,
+		s_entity->fence_context,
+		fence->v_seq);
+	fence_get(&fence->base);
+	list_add_tail(&fence->list, &s_entity->fence_list);
+	if (fence_add_callback(&fence->base,&fence->cb,
+			       amd_sched_fence_wait_cb)) {
+		fence_put(&fence->base);
+		kfree(fence);
+		return NULL;
+	}
+	return fence;
+}
+
+bool amd_sched_check_ts(struct amd_sched_entity *s_entity, uint64_t v_seq)
+{
+	return atomic64_read(&s_entity->last_signaled_v_seq) >= v_seq ? true : false;
+}
+
+void amd_sched_fence_signal(struct amd_sched_fence *fence)
+{
+	if (amd_sched_check_ts(fence->entity, fence->v_seq)) {
+		int ret = fence_signal_locked(&fence->base);
+		if (!ret)
+			FENCE_TRACE(&fence->base, "signaled from irq context\n");
+		else
+			FENCE_TRACE(&fence->base, "was already signaled\n");
+	} else
+		WARN(true, "fence process dismattch with job!\n");
+}
+
+static const char *amd_sched_fence_get_driver_name(struct fence *fence)
+{
+	return "amd_sched";
+}
+
+static const char *amd_sched_fence_get_timeline_name(struct fence *f)
+{
+	struct amd_sched_fence *fence = to_amd_sched_fence(f);
+	return (const char *)fence->entity->name;
+}
+
+static bool amd_sched_fence_enable_signaling(struct fence *f)
+{
+	struct amd_sched_fence *fence = to_amd_sched_fence(f);
+
+	return !amd_sched_check_ts(fence->entity, fence->v_seq);
+}
+
+static bool amd_sched_fence_is_signaled(struct fence *f)
+{
+	struct amd_sched_fence *fence = to_amd_sched_fence(f);
+
+	return amd_sched_check_ts(fence->entity, fence->v_seq);
+}
+
+const struct fence_ops amd_sched_fence_ops = {
+	.get_driver_name = amd_sched_fence_get_driver_name,
+	.get_timeline_name = amd_sched_fence_get_timeline_name,
+	.enable_signaling = amd_sched_fence_enable_signaling,
+	.signaled = amd_sched_fence_is_signaled,
+	.wait = fence_default_wait,
+	.release = NULL,
+};