blob: 941b5920b97b0b4bc794f1a8b8f8d03f5f1aa14a [file] [log] [blame]
Jammy Zhoua72ce6f2015-05-22 18:55:07 +08001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
Ingo Molnarae7e81c2017-02-01 18:07:51 +010027#include <uapi/linux/sched/types.h>
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080028#include <drm/drmP.h>
29#include "gpu_scheduler.h"
30
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -040031#include "spsc_queue.h"
32
Chunming Zhou353da3c2015-09-07 16:06:53 +080033#define CREATE_TRACE_POINTS
34#include "gpu_sched_trace.h"
35
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -040036#define to_amd_sched_job(sched_job) \
37 container_of((sched_job), struct amd_sched_job, queue_node)
38
Christian König3d651932015-11-12 21:10:35 +010039static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
Christian König88079002015-08-24 14:29:40 +020040static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
Chris Wilsonf54d1862016-10-25 13:00:45 +010041static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
Christian König88079002015-08-24 14:29:40 +020042
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080043/* Initialize a given run queue struct */
Christian König432a4ff2015-08-12 11:46:04 +020044static void amd_sched_rq_init(struct amd_sched_rq *rq)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080045{
Christian König2b184d82015-08-18 14:41:25 +020046 spin_lock_init(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020047 INIT_LIST_HEAD(&rq->entities);
Christian König432a4ff2015-08-12 11:46:04 +020048 rq->current_entity = NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080049}
50
Christian König432a4ff2015-08-12 11:46:04 +020051static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
52 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080053{
Chunming Zhoue8deea22015-12-11 18:22:52 +080054 if (!list_empty(&entity->list))
55 return;
Christian König2b184d82015-08-18 14:41:25 +020056 spin_lock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020057 list_add_tail(&entity->list, &rq->entities);
Christian König2b184d82015-08-18 14:41:25 +020058 spin_unlock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020059}
60
61static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
62 struct amd_sched_entity *entity)
63{
Chunming Zhoue8deea22015-12-11 18:22:52 +080064 if (list_empty(&entity->list))
65 return;
Christian König2b184d82015-08-18 14:41:25 +020066 spin_lock(&rq->lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080067 list_del_init(&entity->list);
Christian König432a4ff2015-08-12 11:46:04 +020068 if (rq->current_entity == entity)
69 rq->current_entity = NULL;
Christian König2b184d82015-08-18 14:41:25 +020070 spin_unlock(&rq->lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080071}
72
73/**
Christian König3d651932015-11-12 21:10:35 +010074 * Select an entity which could provide a job to run
75 *
76 * @rq The run queue to check.
77 *
78 * Try to find a ready entity, returns NULL if none found.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080079 */
Christian König3d651932015-11-12 21:10:35 +010080static struct amd_sched_entity *
81amd_sched_rq_select_entity(struct amd_sched_rq *rq)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080082{
Christian König2b184d82015-08-18 14:41:25 +020083 struct amd_sched_entity *entity;
Christian König4cd7f42c2015-08-05 18:18:52 +020084
Christian König2b184d82015-08-18 14:41:25 +020085 spin_lock(&rq->lock);
86
87 entity = rq->current_entity;
Christian König432a4ff2015-08-12 11:46:04 +020088 if (entity) {
89 list_for_each_entry_continue(entity, &rq->entities, list) {
Christian König3d651932015-11-12 21:10:35 +010090 if (amd_sched_entity_is_ready(entity)) {
Christian König432a4ff2015-08-12 11:46:04 +020091 rq->current_entity = entity;
Christian König2b184d82015-08-18 14:41:25 +020092 spin_unlock(&rq->lock);
Christian König3d651932015-11-12 21:10:35 +010093 return entity;
Christian König432a4ff2015-08-12 11:46:04 +020094 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080095 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080096 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080097
Christian König432a4ff2015-08-12 11:46:04 +020098 list_for_each_entry(entity, &rq->entities, list) {
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080099
Christian König3d651932015-11-12 21:10:35 +0100100 if (amd_sched_entity_is_ready(entity)) {
Christian König432a4ff2015-08-12 11:46:04 +0200101 rq->current_entity = entity;
Christian König2b184d82015-08-18 14:41:25 +0200102 spin_unlock(&rq->lock);
Christian König3d651932015-11-12 21:10:35 +0100103 return entity;
Christian König432a4ff2015-08-12 11:46:04 +0200104 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800105
Christian König432a4ff2015-08-12 11:46:04 +0200106 if (entity == rq->current_entity)
107 break;
108 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800109
Christian König2b184d82015-08-18 14:41:25 +0200110 spin_unlock(&rq->lock);
111
Christian König432a4ff2015-08-12 11:46:04 +0200112 return NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800113}
114
115/**
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800116 * Init a context entity used by scheduler when submit to HW ring.
117 *
118 * @sched The pointer to the scheduler
Christian König91404fb2015-08-05 18:33:21 +0200119 * @entity The pointer to a valid amd_sched_entity
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800120 * @rq The run queue this entity belongs
Christian König0e89d0c2015-08-04 16:58:36 +0200121 * @kernel If this is an entity for the kernel
Jammy Zhou1333f722015-07-30 16:36:58 +0800122 * @jobs The max number of jobs in the job queue
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800123 *
124 * return 0 if succeed. negative error code on failure
125*/
Christian König91404fb2015-08-05 18:33:21 +0200126int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
Christian König6f0e54a2015-08-05 21:22:10 +0200127 struct amd_sched_entity *entity,
Christian König432a4ff2015-08-12 11:46:04 +0200128 struct amd_sched_rq *rq,
Monk Liub3eebe32017-10-23 12:23:29 +0800129 uint32_t jobs, atomic_t *guilty)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800130{
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800131 if (!(sched && entity && rq))
132 return -EINVAL;
133
Christian König91404fb2015-08-05 18:33:21 +0200134 memset(entity, 0, sizeof(struct amd_sched_entity));
Christian König0f75aee2015-09-07 18:07:14 +0200135 INIT_LIST_HEAD(&entity->list);
136 entity->rq = rq;
137 entity->sched = sched;
Monk Liub3eebe32017-10-23 12:23:29 +0800138 entity->guilty = guilty;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800139
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400140 spin_lock_init(&entity->rq_lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800141 spin_lock_init(&entity->queue_lock);
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400142 spsc_queue_init(&entity->job_queue);
Christian König0f75aee2015-09-07 18:07:14 +0200143
Christian Königce882e62015-08-19 15:00:55 +0200144 atomic_set(&entity->fence_seq, 0);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100145 entity->fence_context = dma_fence_context_alloc(2);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800146
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800147 return 0;
148}
149
150/**
151 * Query if entity is initialized
152 *
153 * @sched Pointer to scheduler instance
154 * @entity The pointer to a valid scheduler entity
155 *
156 * return true if entity is initialized, false otherwise
157*/
Christian Königd54fdb92015-08-20 17:03:48 +0200158static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
159 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800160{
Christian König0f75aee2015-09-07 18:07:14 +0200161 return entity->sched == sched &&
162 entity->rq != NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800163}
164
Christian Königaef48522015-08-20 14:47:46 +0200165/**
166 * Check if entity is idle
167 *
168 * @entity The pointer to a valid scheduler entity
169 *
170 * Return true if entity don't has any unscheduled jobs.
171 */
172static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800173{
Christian Königaef48522015-08-20 14:47:46 +0200174 rmb();
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400175 if (spsc_queue_peek(&entity->job_queue) == NULL)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800176 return true;
177
178 return false;
179}
180
181/**
Christian König3d651932015-11-12 21:10:35 +0100182 * Check if entity is ready
183 *
184 * @entity The pointer to a valid scheduler entity
185 *
186 * Return true if entity could provide a job.
187 */
188static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
189{
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400190 if (spsc_queue_peek(&entity->job_queue) == NULL)
Christian König3d651932015-11-12 21:10:35 +0100191 return false;
192
Mark Rutland6aa7de02017-10-23 14:07:29 -0700193 if (READ_ONCE(entity->dependency))
Christian König3d651932015-11-12 21:10:35 +0100194 return false;
195
196 return true;
197}
198
199/**
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800200 * Destroy a context entity
201 *
202 * @sched Pointer to scheduler instance
203 * @entity The pointer to a valid scheduler entity
204 *
Christian König062c7fb2015-08-21 15:46:43 +0200205 * Cleanup and free the allocated resources.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800206 */
Christian König062c7fb2015-08-21 15:46:43 +0200207void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
208 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800209{
Christian Königf0694d32017-08-21 14:27:51 +0200210 int r;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800211
Christian Königd54fdb92015-08-20 17:03:48 +0200212 if (!amd_sched_entity_is_initialized(sched, entity))
Christian König062c7fb2015-08-21 15:46:43 +0200213 return;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800214 /**
215 * The client will not queue more IBs during this fini, consume existing
Christian Königf0694d32017-08-21 14:27:51 +0200216 * queued IBs or discard them on SIGKILL
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800217 */
Christian Königf0694d32017-08-21 14:27:51 +0200218 if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
219 r = -ERESTARTSYS;
220 else
221 r = wait_event_killable(sched->job_scheduled,
222 amd_sched_entity_is_idle(entity));
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400223 amd_sched_entity_set_rq(entity, NULL);
Christian Königf0694d32017-08-21 14:27:51 +0200224 if (r) {
225 struct amd_sched_job *job;
Alex Deucherc9450122017-10-12 13:08:48 -0400226
Christian Königf0694d32017-08-21 14:27:51 +0200227 /* Park the kernel for a moment to make sure it isn't processing
228 * our enity.
229 */
230 kthread_park(sched->thread);
231 kthread_unpark(sched->thread);
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400232 while ((job = to_amd_sched_job(spsc_queue_pop(&entity->job_queue)))) {
Nicolai Hähnle79867462017-09-28 11:57:32 +0200233 struct amd_sched_fence *s_fence = job->s_fence;
234 amd_sched_fence_scheduled(s_fence);
235 dma_fence_set_error(&s_fence->finished, -ESRCH);
236 amd_sched_fence_finished(s_fence);
237 dma_fence_put(&s_fence->finished);
Christian Königf0694d32017-08-21 14:27:51 +0200238 sched->ops->free_job(job);
Nicolai Hähnle79867462017-09-28 11:57:32 +0200239 }
Christian Königf0694d32017-08-21 14:27:51 +0200240 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800241}
242
Chris Wilsonf54d1862016-10-25 13:00:45 +0100243static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
Christian Könige61235d2015-08-25 11:05:36 +0200244{
245 struct amd_sched_entity *entity =
246 container_of(cb, struct amd_sched_entity, cb);
247 entity->dependency = NULL;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100248 dma_fence_put(f);
Christian König0f75aee2015-09-07 18:07:14 +0200249 amd_sched_wakeup(entity->sched);
Christian Könige61235d2015-08-25 11:05:36 +0200250}
251
Chris Wilsonf54d1862016-10-25 13:00:45 +0100252static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
Monk Liu777dbd42016-01-26 14:59:57 +0800253{
254 struct amd_sched_entity *entity =
255 container_of(cb, struct amd_sched_entity, cb);
256 entity->dependency = NULL;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100257 dma_fence_put(f);
Monk Liu777dbd42016-01-26 14:59:57 +0800258}
259
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400260void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
261 struct amd_sched_rq *rq)
262{
263 if (entity->rq == rq)
264 return;
265
266 spin_lock(&entity->rq_lock);
267
268 if (entity->rq)
269 amd_sched_rq_remove_entity(entity->rq, entity);
270
271 entity->rq = rq;
272 if (rq)
273 amd_sched_rq_add_entity(rq, entity);
274
275 spin_unlock(&entity->rq_lock);
276}
277
Chunming Zhou30514de2017-05-09 13:39:40 +0800278bool amd_sched_dependency_optimized(struct dma_fence* fence,
279 struct amd_sched_entity *entity)
280{
281 struct amd_gpu_scheduler *sched = entity->sched;
282 struct amd_sched_fence *s_fence;
283
284 if (!fence || dma_fence_is_signaled(fence))
285 return false;
286 if (fence->context == entity->fence_context)
287 return true;
288 s_fence = to_amd_sched_fence(fence);
289 if (s_fence && s_fence->sched == sched)
290 return true;
291
292 return false;
293}
294
Christian König393a0bd2015-11-05 12:57:10 +0100295static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
296{
297 struct amd_gpu_scheduler *sched = entity->sched;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100298 struct dma_fence * fence = entity->dependency;
Christian König393a0bd2015-11-05 12:57:10 +0100299 struct amd_sched_fence *s_fence;
300
301 if (fence->context == entity->fence_context) {
302 /* We can ignore fences from ourself */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100303 dma_fence_put(entity->dependency);
Christian König393a0bd2015-11-05 12:57:10 +0100304 return false;
305 }
306
307 s_fence = to_amd_sched_fence(fence);
308 if (s_fence && s_fence->sched == sched) {
Christian König393a0bd2015-11-05 12:57:10 +0100309
Christian König6fc13672016-05-20 12:53:52 +0200310 /*
311 * Fence is from the same scheduler, only need to wait for
312 * it to be scheduled
313 */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100314 fence = dma_fence_get(&s_fence->scheduled);
315 dma_fence_put(entity->dependency);
Christian König6fc13672016-05-20 12:53:52 +0200316 entity->dependency = fence;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100317 if (!dma_fence_add_callback(fence, &entity->cb,
318 amd_sched_entity_clear_dep))
Christian König6fc13672016-05-20 12:53:52 +0200319 return true;
320
321 /* Ignore it when it is already scheduled */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100322 dma_fence_put(fence);
Christian König6fc13672016-05-20 12:53:52 +0200323 return false;
Christian König393a0bd2015-11-05 12:57:10 +0100324 }
325
Chris Wilsonf54d1862016-10-25 13:00:45 +0100326 if (!dma_fence_add_callback(entity->dependency, &entity->cb,
327 amd_sched_entity_wakeup))
Christian König393a0bd2015-11-05 12:57:10 +0100328 return true;
329
Chris Wilsonf54d1862016-10-25 13:00:45 +0100330 dma_fence_put(entity->dependency);
Christian König393a0bd2015-11-05 12:57:10 +0100331 return false;
332}
333
Christian König69bd5bf2015-08-26 11:31:23 +0200334static struct amd_sched_job *
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400335amd_sched_entity_pop_job(struct amd_sched_entity *entity)
Christian König69bd5bf2015-08-26 11:31:23 +0200336{
Christian König0f75aee2015-09-07 18:07:14 +0200337 struct amd_gpu_scheduler *sched = entity->sched;
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400338 struct amd_sched_job *sched_job = to_amd_sched_job(
339 spsc_queue_peek(&entity->job_queue));
Christian König69bd5bf2015-08-26 11:31:23 +0200340
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400341 if (!sched_job)
Christian König69bd5bf2015-08-26 11:31:23 +0200342 return NULL;
343
Andrey Grodzovskya4176cb2017-10-24 13:30:16 -0400344 while ((entity->dependency = sched->ops->dependency(sched_job, entity)))
Christian König393a0bd2015-11-05 12:57:10 +0100345 if (amd_sched_entity_add_dependency_cb(entity))
Christian Könige61235d2015-08-25 11:05:36 +0200346 return NULL;
Christian Könige61235d2015-08-25 11:05:36 +0200347
Monk Liu48f05f22017-10-25 16:21:08 +0800348 /* skip jobs from entity that marked guilty */
349 if (entity->guilty && atomic_read(entity->guilty))
350 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
351
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400352 spsc_queue_pop(&entity->job_queue);
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800353 return sched_job;
Christian König69bd5bf2015-08-26 11:31:23 +0200354}
355
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800356/**
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400357 * Submit a job to the job queue
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800358 *
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800359 * @sched_job The pointer to job required to submit
Christian König6c859272015-08-20 16:12:50 +0200360 *
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400361 * Returns 0 for success, negative error code otherwise.
Christian König6c859272015-08-20 16:12:50 +0200362 */
Andrey Grodzovskya4176cb2017-10-24 13:30:16 -0400363void amd_sched_entity_push_job(struct amd_sched_job *sched_job,
364 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800365{
Nicolai Hähnle786b5212015-12-02 17:35:12 +0100366 struct amd_gpu_scheduler *sched = sched_job->sched;
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400367 bool first = false;
368
Andrey Grodzovskya4176cb2017-10-24 13:30:16 -0400369 trace_amd_sched_job(sched_job, entity);
Christian König6c859272015-08-20 16:12:50 +0200370
371 spin_lock(&entity->queue_lock);
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400372 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
Christian König6c859272015-08-20 16:12:50 +0200373
374 spin_unlock(&entity->queue_lock);
375
376 /* first job wakes up scheduler */
Chunming Zhoue8deea22015-12-11 18:22:52 +0800377 if (first) {
378 /* Add the entity to the run queue */
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400379 spin_lock(&entity->rq_lock);
Chunming Zhoue8deea22015-12-11 18:22:52 +0800380 amd_sched_rq_add_entity(entity->rq, entity);
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400381 spin_unlock(&entity->rq_lock);
Nicolai Hähnle786b5212015-12-02 17:35:12 +0100382 amd_sched_wakeup(sched);
Chunming Zhoue8deea22015-12-11 18:22:52 +0800383 }
Christian König6c859272015-08-20 16:12:50 +0200384}
385
Nicolai Hähnle1650c142017-09-28 11:35:05 +0200386/* job_finish is called after hw fence signaled
Monk Liu0de24792016-03-04 18:51:02 +0800387 */
Christian Königc5f74f72016-05-19 09:54:15 +0200388static void amd_sched_job_finish(struct work_struct *work)
Monk Liu0de24792016-03-04 18:51:02 +0800389{
Christian Königc5f74f72016-05-19 09:54:15 +0200390 struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
391 finish_work);
Monk Liu0de24792016-03-04 18:51:02 +0800392 struct amd_gpu_scheduler *sched = s_job->sched;
393
Christian Königf42d20a92016-05-18 15:40:58 +0200394 /* remove job from ring_mirror_list */
Christian König1059e112016-06-13 16:12:43 +0200395 spin_lock(&sched->job_list_lock);
Christian Königf42d20a92016-05-18 15:40:58 +0200396 list_del_init(&s_job->node);
Monk Liu0de24792016-03-04 18:51:02 +0800397 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
Christian Königc5f74f72016-05-19 09:54:15 +0200398 struct amd_sched_job *next;
399
Christian König1059e112016-06-13 16:12:43 +0200400 spin_unlock(&sched->job_list_lock);
Christian Königc5f74f72016-05-19 09:54:15 +0200401 cancel_delayed_work_sync(&s_job->work_tdr);
Christian König1059e112016-06-13 16:12:43 +0200402 spin_lock(&sched->job_list_lock);
Monk Liu0de24792016-03-04 18:51:02 +0800403
404 /* queue TDR for next job */
405 next = list_first_entry_or_null(&sched->ring_mirror_list,
406 struct amd_sched_job, node);
407
Christian Königc5f74f72016-05-19 09:54:15 +0200408 if (next)
Monk Liu0de24792016-03-04 18:51:02 +0800409 schedule_delayed_work(&next->work_tdr, sched->timeout);
Monk Liu0de24792016-03-04 18:51:02 +0800410 }
Christian König1059e112016-06-13 16:12:43 +0200411 spin_unlock(&sched->job_list_lock);
Christian König7fd5e362017-10-13 10:58:15 +0200412 dma_fence_put(&s_job->s_fence->finished);
Christian Königc5f74f72016-05-19 09:54:15 +0200413 sched->ops->free_job(s_job);
414}
415
Chris Wilsonf54d1862016-10-25 13:00:45 +0100416static void amd_sched_job_finish_cb(struct dma_fence *f,
417 struct dma_fence_cb *cb)
Christian Königc5f74f72016-05-19 09:54:15 +0200418{
419 struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
420 finish_cb);
421 schedule_work(&job->finish_work);
Monk Liu0de24792016-03-04 18:51:02 +0800422}
423
Christian König7392c322016-05-18 13:00:38 +0200424static void amd_sched_job_begin(struct amd_sched_job *s_job)
Monk Liu0de24792016-03-04 18:51:02 +0800425{
426 struct amd_gpu_scheduler *sched = s_job->sched;
427
Nicolai Hähnle214a91e2017-09-28 11:37:02 +0200428 dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
429 amd_sched_job_finish_cb);
430
Christian König1059e112016-06-13 16:12:43 +0200431 spin_lock(&sched->job_list_lock);
Christian Königf42d20a92016-05-18 15:40:58 +0200432 list_add_tail(&s_job->node, &sched->ring_mirror_list);
Monk Liu0de24792016-03-04 18:51:02 +0800433 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
Christian König16a71332016-05-18 09:43:07 +0200434 list_first_entry_or_null(&sched->ring_mirror_list,
435 struct amd_sched_job, node) == s_job)
Monk Liu0de24792016-03-04 18:51:02 +0800436 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
Christian König1059e112016-06-13 16:12:43 +0200437 spin_unlock(&sched->job_list_lock);
Monk Liu0de24792016-03-04 18:51:02 +0800438}
439
Christian König0e51a772016-05-18 14:19:32 +0200440static void amd_sched_job_timedout(struct work_struct *work)
441{
442 struct amd_sched_job *job = container_of(work, struct amd_sched_job,
443 work_tdr.work);
444
445 job->sched->ops->timedout_job(job);
446}
447
Monk Liua8a51a72017-10-16 19:46:43 +0800448void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_job *bad)
Chunming Zhoue686e752016-06-30 11:30:37 +0800449{
450 struct amd_sched_job *s_job;
Monk Liua8a51a72017-10-16 19:46:43 +0800451 struct amd_sched_entity *entity, *tmp;
452 int i;;
Chunming Zhoue686e752016-06-30 11:30:37 +0800453
454 spin_lock(&sched->job_list_lock);
455 list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
Chunming Zhoua6bef672017-04-24 17:39:00 +0800456 if (s_job->s_fence->parent &&
457 dma_fence_remove_callback(s_job->s_fence->parent,
458 &s_job->s_fence->cb)) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100459 dma_fence_put(s_job->s_fence->parent);
Chunming Zhoue686e752016-06-30 11:30:37 +0800460 s_job->s_fence->parent = NULL;
Monk Liu65781c72017-05-11 13:36:44 +0800461 atomic_dec(&sched->hw_rq_count);
Chunming Zhoue686e752016-06-30 11:30:37 +0800462 }
463 }
Monk Liu65781c72017-05-11 13:36:44 +0800464 spin_unlock(&sched->job_list_lock);
Monk Liua8a51a72017-10-16 19:46:43 +0800465
466 if (bad) {
Monk Liu48f05f22017-10-25 16:21:08 +0800467 /* don't increase @bad's karma if it's from KERNEL RQ,
468 * becuase sometimes GPU hang would cause kernel jobs (like VM updating jobs)
469 * corrupt but keep in mind that kernel jobs always considered good.
470 */
471 for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_KERNEL; i++ ) {
Monk Liua8a51a72017-10-16 19:46:43 +0800472 struct amd_sched_rq *rq = &sched->sched_rq[i];
473
474 spin_lock(&rq->lock);
475 list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
476 if (bad->s_fence->scheduled.context == entity->fence_context) {
Monk Liu48f05f22017-10-25 16:21:08 +0800477 if (atomic_inc_return(&bad->karma) > bad->sched->hang_limit)
478 if (entity->guilty)
479 atomic_set(entity->guilty, 1);
Monk Liua8a51a72017-10-16 19:46:43 +0800480 break;
481 }
482 }
483 spin_unlock(&rq->lock);
Monk Liu48f05f22017-10-25 16:21:08 +0800484 if (&entity->list != &rq->entities)
Monk Liua8a51a72017-10-16 19:46:43 +0800485 break;
486 }
487 }
Monk Liu65781c72017-05-11 13:36:44 +0800488}
489
490void amd_sched_job_kickout(struct amd_sched_job *s_job)
491{
492 struct amd_gpu_scheduler *sched = s_job->sched;
493
494 spin_lock(&sched->job_list_lock);
495 list_del_init(&s_job->node);
Chunming Zhoue686e752016-06-30 11:30:37 +0800496 spin_unlock(&sched->job_list_lock);
497}
498
Chunming Zhouec75f572016-06-29 15:23:55 +0800499void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
500{
Chunming Zhou1c62cf92016-07-25 13:55:35 +0800501 struct amd_sched_job *s_job, *tmp;
Monk Liu48f05f22017-10-25 16:21:08 +0800502 bool found_guilty = false;
Chunming Zhouec75f572016-06-29 15:23:55 +0800503 int r;
504
505 spin_lock(&sched->job_list_lock);
506 s_job = list_first_entry_or_null(&sched->ring_mirror_list,
507 struct amd_sched_job, node);
Christian Königbdf00132016-08-16 19:52:35 +0200508 if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
Chunming Zhouec75f572016-06-29 15:23:55 +0800509 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
510
Chunming Zhou1c62cf92016-07-25 13:55:35 +0800511 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
Chunming Zhouec75f572016-06-29 15:23:55 +0800512 struct amd_sched_fence *s_fence = s_job->s_fence;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100513 struct dma_fence *fence;
Monk Liu48f05f22017-10-25 16:21:08 +0800514 uint64_t guilty_context;
515
516 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
517 found_guilty = true;
518 guilty_context = s_job->s_fence->scheduled.context;
519 }
520
521 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
522 dma_fence_set_error(&s_fence->finished, -ECANCELED);
Chunming Zhoubdc2eea2016-07-22 13:01:02 +0800523
Chunming Zhou1c62cf92016-07-25 13:55:35 +0800524 spin_unlock(&sched->job_list_lock);
525 fence = sched->ops->run_job(s_job);
Chunming Zhoubdc2eea2016-07-22 13:01:02 +0800526 atomic_inc(&sched->hw_rq_count);
Chunming Zhouec75f572016-06-29 15:23:55 +0800527 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100528 s_fence->parent = dma_fence_get(fence);
529 r = dma_fence_add_callback(fence, &s_fence->cb,
530 amd_sched_process_job);
Chunming Zhouec75f572016-06-29 15:23:55 +0800531 if (r == -ENOENT)
532 amd_sched_process_job(fence, &s_fence->cb);
533 else if (r)
534 DRM_ERROR("fence add callback failed (%d)\n",
535 r);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100536 dma_fence_put(fence);
Chunming Zhouec75f572016-06-29 15:23:55 +0800537 } else {
Chunming Zhouec75f572016-06-29 15:23:55 +0800538 amd_sched_process_job(NULL, &s_fence->cb);
539 }
Chunming Zhou1c62cf92016-07-25 13:55:35 +0800540 spin_lock(&sched->job_list_lock);
Chunming Zhouec75f572016-06-29 15:23:55 +0800541 }
542 spin_unlock(&sched->job_list_lock);
543}
544
Monk Liue6869412016-03-07 12:49:55 +0800545/* init a sched_job with basic field */
546int amd_sched_job_init(struct amd_sched_job *job,
Christian König16a71332016-05-18 09:43:07 +0200547 struct amd_gpu_scheduler *sched,
548 struct amd_sched_entity *entity,
Christian König595a9cd2016-06-30 10:52:03 +0200549 void *owner)
Monk Liue6869412016-03-07 12:49:55 +0800550{
551 job->sched = sched;
Andrey Grodzovskyd1f6dc12017-10-19 14:29:46 -0400552 job->s_priority = entity->rq - sched->sched_rq;
Monk Liue6869412016-03-07 12:49:55 +0800553 job->s_fence = amd_sched_fence_create(entity, owner);
554 if (!job->s_fence)
555 return -ENOMEM;
Chunming Zhoucb3696f2017-05-09 15:34:07 +0800556 job->id = atomic64_inc_return(&sched->job_id_count);
Monk Liue6869412016-03-07 12:49:55 +0800557
Christian Königc5f74f72016-05-19 09:54:15 +0200558 INIT_WORK(&job->finish_work, amd_sched_job_finish);
559 INIT_LIST_HEAD(&job->node);
Christian König0e51a772016-05-18 14:19:32 +0200560 INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
Monk Liu48350962016-03-04 14:33:44 +0800561
Monk Liue6869412016-03-07 12:49:55 +0800562 return 0;
563}
564
Christian Könige688b7282015-08-20 17:01:01 +0200565/**
566 * Return ture if we can push more jobs to the hw.
567 */
568static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
569{
570 return atomic_read(&sched->hw_rq_count) <
571 sched->hw_submission_limit;
572}
573
574/**
Christian König88079002015-08-24 14:29:40 +0200575 * Wake up the scheduler when it is ready
576 */
577static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
578{
579 if (amd_sched_ready(sched))
Christian Königc2b6bd72015-08-25 21:39:31 +0200580 wake_up_interruptible(&sched->wake_up_worker);
Christian König88079002015-08-24 14:29:40 +0200581}
582
583/**
Christian König3d651932015-11-12 21:10:35 +0100584 * Select next entity to process
Christian Könige688b7282015-08-20 17:01:01 +0200585*/
Christian König3d651932015-11-12 21:10:35 +0100586static struct amd_sched_entity *
587amd_sched_select_entity(struct amd_gpu_scheduler *sched)
Christian Könige688b7282015-08-20 17:01:01 +0200588{
Christian König3d651932015-11-12 21:10:35 +0100589 struct amd_sched_entity *entity;
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800590 int i;
Christian Könige688b7282015-08-20 17:01:01 +0200591
592 if (!amd_sched_ready(sched))
593 return NULL;
594
595 /* Kernel run queue has higher priority than normal run queue*/
Chunming Zhou153de9d2017-03-16 11:44:49 +0800596 for (i = AMD_SCHED_PRIORITY_MAX - 1; i >= AMD_SCHED_PRIORITY_MIN; i--) {
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800597 entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
598 if (entity)
599 break;
600 }
Christian Könige688b7282015-08-20 17:01:01 +0200601
Christian König3d651932015-11-12 21:10:35 +0100602 return entity;
Christian Könige688b7282015-08-20 17:01:01 +0200603}
604
Chris Wilsonf54d1862016-10-25 13:00:45 +0100605static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
Christian König6f0e54a2015-08-05 21:22:10 +0200606{
Christian König258f3f92015-08-31 17:02:52 +0200607 struct amd_sched_fence *s_fence =
608 container_of(cb, struct amd_sched_fence, cb);
Christian König9b398fa2015-09-07 18:16:49 +0200609 struct amd_gpu_scheduler *sched = s_fence->sched;
Christian König6f0e54a2015-08-05 21:22:10 +0200610
Christian König7fd5e362017-10-13 10:58:15 +0200611 dma_fence_get(&s_fence->finished);
Christian Königc746ba22015-08-19 16:12:15 +0200612 atomic_dec(&sched->hw_rq_count);
Christian König6fc13672016-05-20 12:53:52 +0200613 amd_sched_fence_finished(s_fence);
Monk Liucccd9bc2016-03-04 14:42:26 +0800614
Chunming Zhou7034dec2015-11-11 14:56:00 +0800615 trace_amd_sched_process_job(s_fence);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100616 dma_fence_put(&s_fence->finished);
Christian Königc2b6bd72015-08-25 21:39:31 +0200617 wake_up_interruptible(&sched->wake_up_worker);
Christian König6f0e54a2015-08-05 21:22:10 +0200618}
619
Chunming Zhou0875dc92016-06-12 15:41:58 +0800620static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
621{
622 if (kthread_should_park()) {
623 kthread_parkme();
624 return true;
625 }
626
627 return false;
628}
629
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800630static int amd_sched_main(void *param)
631{
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800632 struct sched_param sparam = {.sched_priority = 1};
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800633 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400634 int r;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800635
636 sched_setscheduler(current, SCHED_FIFO, &sparam);
637
638 while (!kthread_should_stop()) {
Chunming Zhou0875dc92016-06-12 15:41:58 +0800639 struct amd_sched_entity *entity = NULL;
Christian König258f3f92015-08-31 17:02:52 +0200640 struct amd_sched_fence *s_fence;
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800641 struct amd_sched_job *sched_job;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100642 struct dma_fence *fence;
Christian König6f0e54a2015-08-05 21:22:10 +0200643
Christian Königc2b6bd72015-08-25 21:39:31 +0200644 wait_event_interruptible(sched->wake_up_worker,
Chunming Zhou0875dc92016-06-12 15:41:58 +0800645 (!amd_sched_blocked(sched) &&
646 (entity = amd_sched_select_entity(sched))) ||
647 kthread_should_stop());
Christian Königf85a6dd2015-08-19 17:37:52 +0200648
Christian König3d651932015-11-12 21:10:35 +0100649 if (!entity)
650 continue;
651
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400652 sched_job = amd_sched_entity_pop_job(entity);
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800653 if (!sched_job)
Christian Königf85a6dd2015-08-19 17:37:52 +0200654 continue;
655
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800656 s_fence = sched_job->s_fence;
Junwei Zhang2440ff22015-10-10 08:48:42 +0800657
Christian Königb034b572015-08-20 17:08:25 +0200658 atomic_inc(&sched->hw_rq_count);
Christian König7392c322016-05-18 13:00:38 +0200659 amd_sched_job_begin(sched_job);
Christian König7392c322016-05-18 13:00:38 +0200660
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800661 fence = sched->ops->run_job(sched_job);
Christian König393a0bd2015-11-05 12:57:10 +0100662 amd_sched_fence_scheduled(s_fence);
Nicolai Hähnle29d25352017-09-28 11:51:32 +0200663
Christian König6f0e54a2015-08-05 21:22:10 +0200664 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100665 s_fence->parent = dma_fence_get(fence);
666 r = dma_fence_add_callback(fence, &s_fence->cb,
667 amd_sched_process_job);
Christian König6f0e54a2015-08-05 21:22:10 +0200668 if (r == -ENOENT)
Christian König258f3f92015-08-31 17:02:52 +0200669 amd_sched_process_job(fence, &s_fence->cb);
Christian König6f0e54a2015-08-05 21:22:10 +0200670 else if (r)
Christian König16a71332016-05-18 09:43:07 +0200671 DRM_ERROR("fence add callback failed (%d)\n",
672 r);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100673 dma_fence_put(fence);
Christian König27439fc2015-09-02 12:03:06 +0200674 } else {
Christian König258f3f92015-08-31 17:02:52 +0200675 amd_sched_process_job(NULL, &s_fence->cb);
Christian König6f0e54a2015-08-05 21:22:10 +0200676 }
Christian Königaef48522015-08-20 14:47:46 +0200677
Christian Königc2b6bd72015-08-25 21:39:31 +0200678 wake_up(&sched->job_scheduled);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800679 }
680 return 0;
681}
682
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800683/**
Christian König4f839a22015-09-08 20:22:31 +0200684 * Init a gpu scheduler instance
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800685 *
Christian König4f839a22015-09-08 20:22:31 +0200686 * @sched The pointer to the scheduler
Christian König69f7dd62015-08-20 17:24:40 +0200687 * @ops The backend operations for this scheduler.
Christian König69f7dd62015-08-20 17:24:40 +0200688 * @hw_submissions Number of hw submissions to do.
Christian König4f839a22015-09-08 20:22:31 +0200689 * @name Name used for debugging
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800690 *
Christian König4f839a22015-09-08 20:22:31 +0200691 * Return 0 on success, otherwise error code.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800692*/
Christian König4f839a22015-09-08 20:22:31 +0200693int amd_sched_init(struct amd_gpu_scheduler *sched,
Nils Wallménius62250a92016-04-10 16:30:00 +0200694 const struct amd_sched_backend_ops *ops,
Monk Liu95aa9b12017-10-17 13:40:54 +0800695 unsigned hw_submission,
696 unsigned hang_limit,
697 long timeout,
698 const char *name)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800699{
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800700 int i;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800701 sched->ops = ops;
Chunming Zhou4cef9262015-08-05 19:52:14 +0800702 sched->hw_submission_limit = hw_submission;
Christian König4f839a22015-09-08 20:22:31 +0200703 sched->name = name;
Junwei Zhang2440ff22015-10-10 08:48:42 +0800704 sched->timeout = timeout;
Monk Liu95aa9b12017-10-17 13:40:54 +0800705 sched->hang_limit = hang_limit;
Chunming Zhou153de9d2017-03-16 11:44:49 +0800706 for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++)
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800707 amd_sched_rq_init(&sched->sched_rq[i]);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800708
Christian Königc2b6bd72015-08-25 21:39:31 +0200709 init_waitqueue_head(&sched->wake_up_worker);
710 init_waitqueue_head(&sched->job_scheduled);
Monk Liu48350962016-03-04 14:33:44 +0800711 INIT_LIST_HEAD(&sched->ring_mirror_list);
712 spin_lock_init(&sched->job_list_lock);
Christian Königc746ba22015-08-19 16:12:15 +0200713 atomic_set(&sched->hw_rq_count, 0);
Andres Rodriguez93f8b362017-03-09 21:25:50 -0500714 atomic64_set(&sched->job_id_count, 0);
Christian König4f839a22015-09-08 20:22:31 +0200715
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800716 /* Each scheduler will run on a seperate kernel thread */
Christian Königc14692f02015-08-21 15:18:47 +0200717 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
Christian Königf4956592015-08-20 16:59:38 +0200718 if (IS_ERR(sched->thread)) {
Christian König4f839a22015-09-08 20:22:31 +0200719 DRM_ERROR("Failed to create scheduler for %s.\n", name);
720 return PTR_ERR(sched->thread);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800721 }
722
Christian König4f839a22015-09-08 20:22:31 +0200723 return 0;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800724}
725
726/**
727 * Destroy a gpu scheduler
728 *
729 * @sched The pointer to the scheduler
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800730 */
Christian König4f839a22015-09-08 20:22:31 +0200731void amd_sched_fini(struct amd_gpu_scheduler *sched)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800732{
Dave Airlie32544d02015-11-03 11:10:03 -0500733 if (sched->thread)
734 kthread_stop(sched->thread);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800735}