drm/amd: add kmem cache for sched fence

Change-Id: I45bb8ff10ef05dc3b15e31a77fbcf31117705f11
Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 89619a5..fe5b3c4 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -34,6 +34,9 @@
 amd_sched_entity_pop_job(struct amd_sched_entity *entity);
 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
 
+struct kmem_cache *sched_fence_slab;
+atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
+
 /* Initialize a given run queue struct */
 static void amd_sched_rq_init(struct amd_sched_rq *rq)
 {
@@ -450,6 +453,13 @@
 	init_waitqueue_head(&sched->wake_up_worker);
 	init_waitqueue_head(&sched->job_scheduled);
 	atomic_set(&sched->hw_rq_count, 0);
+	if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
+		sched_fence_slab = kmem_cache_create(
+			"amd_sched_fence", sizeof(struct amd_sched_fence), 0,
+			SLAB_HWCACHE_ALIGN, NULL);
+		if (!sched_fence_slab)
+			return -ENOMEM;
+	}
 
 	/* Each scheduler will run on a seperate kernel thread */
 	sched->thread = kthread_run(amd_sched_main, sched, sched->name);
@@ -470,4 +480,6 @@
 {
 	if (sched->thread)
 		kthread_stop(sched->thread);
+	if (atomic_dec_and_test(&sched_fence_slab_ref))
+		kmem_cache_destroy(sched_fence_slab);
 }