blob: 2c18996d59c58e6247a24936da9dc155f76d78cf [file] [log] [blame]
Jammy Zhoua72ce6f2015-05-22 18:55:07 +08001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080022 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +010023
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080024#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
Ingo Molnarae7e81c2017-02-01 18:07:51 +010027#include <uapi/linux/sched/types.h>
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080028#include <drm/drmP.h>
Lucas Stach1b1f42d2017-12-06 17:49:39 +010029#include <drm/gpu_scheduler.h>
30#include <drm/spsc_queue.h>
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -040031
Chunming Zhou353da3c2015-09-07 16:06:53 +080032#define CREATE_TRACE_POINTS
Lucas Stach1b1f42d2017-12-06 17:49:39 +010033#include <drm/gpu_scheduler_trace.h>
Chunming Zhou353da3c2015-09-07 16:06:53 +080034
Lucas Stach1b1f42d2017-12-06 17:49:39 +010035#define to_drm_sched_job(sched_job) \
36 container_of((sched_job), struct drm_sched_job, queue_node)
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -040037
Lucas Stach1b1f42d2017-12-06 17:49:39 +010038static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
39static void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
40static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
Christian König88079002015-08-24 14:29:40 +020041
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080042/* Initialize a given run queue struct */
Lucas Stach1b1f42d2017-12-06 17:49:39 +010043static void drm_sched_rq_init(struct drm_sched_rq *rq)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080044{
Christian König2b184d82015-08-18 14:41:25 +020045 spin_lock_init(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020046 INIT_LIST_HEAD(&rq->entities);
Christian König432a4ff2015-08-12 11:46:04 +020047 rq->current_entity = NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080048}
49
Lucas Stach1b1f42d2017-12-06 17:49:39 +010050static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
51 struct drm_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080052{
Chunming Zhoue8deea22015-12-11 18:22:52 +080053 if (!list_empty(&entity->list))
54 return;
Christian König2b184d82015-08-18 14:41:25 +020055 spin_lock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020056 list_add_tail(&entity->list, &rq->entities);
Christian König2b184d82015-08-18 14:41:25 +020057 spin_unlock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020058}
59
Lucas Stach1b1f42d2017-12-06 17:49:39 +010060static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
61 struct drm_sched_entity *entity)
Christian König432a4ff2015-08-12 11:46:04 +020062{
Chunming Zhoue8deea22015-12-11 18:22:52 +080063 if (list_empty(&entity->list))
64 return;
Christian König2b184d82015-08-18 14:41:25 +020065 spin_lock(&rq->lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080066 list_del_init(&entity->list);
Christian König432a4ff2015-08-12 11:46:04 +020067 if (rq->current_entity == entity)
68 rq->current_entity = NULL;
Christian König2b184d82015-08-18 14:41:25 +020069 spin_unlock(&rq->lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080070}
71
72/**
Christian König3d651932015-11-12 21:10:35 +010073 * Select an entity which could provide a job to run
74 *
75 * @rq The run queue to check.
76 *
77 * Try to find a ready entity, returns NULL if none found.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080078 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +010079static struct drm_sched_entity *
80drm_sched_rq_select_entity(struct drm_sched_rq *rq)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080081{
Lucas Stach1b1f42d2017-12-06 17:49:39 +010082 struct drm_sched_entity *entity;
Christian König4cd7f42c2015-08-05 18:18:52 +020083
Christian König2b184d82015-08-18 14:41:25 +020084 spin_lock(&rq->lock);
85
86 entity = rq->current_entity;
Christian König432a4ff2015-08-12 11:46:04 +020087 if (entity) {
88 list_for_each_entry_continue(entity, &rq->entities, list) {
Lucas Stach1b1f42d2017-12-06 17:49:39 +010089 if (drm_sched_entity_is_ready(entity)) {
Christian König432a4ff2015-08-12 11:46:04 +020090 rq->current_entity = entity;
Christian König2b184d82015-08-18 14:41:25 +020091 spin_unlock(&rq->lock);
Christian König3d651932015-11-12 21:10:35 +010092 return entity;
Christian König432a4ff2015-08-12 11:46:04 +020093 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080094 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080095 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080096
Christian König432a4ff2015-08-12 11:46:04 +020097 list_for_each_entry(entity, &rq->entities, list) {
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080098
Lucas Stach1b1f42d2017-12-06 17:49:39 +010099 if (drm_sched_entity_is_ready(entity)) {
Christian König432a4ff2015-08-12 11:46:04 +0200100 rq->current_entity = entity;
Christian König2b184d82015-08-18 14:41:25 +0200101 spin_unlock(&rq->lock);
Christian König3d651932015-11-12 21:10:35 +0100102 return entity;
Christian König432a4ff2015-08-12 11:46:04 +0200103 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800104
Christian König432a4ff2015-08-12 11:46:04 +0200105 if (entity == rq->current_entity)
106 break;
107 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800108
Christian König2b184d82015-08-18 14:41:25 +0200109 spin_unlock(&rq->lock);
110
Christian König432a4ff2015-08-12 11:46:04 +0200111 return NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800112}
113
114/**
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800115 * Init a context entity used by scheduler when submit to HW ring.
116 *
117 * @sched The pointer to the scheduler
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100118 * @entity The pointer to a valid drm_sched_entity
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800119 * @rq The run queue this entity belongs
Christian König0e89d0c2015-08-04 16:58:36 +0200120 * @kernel If this is an entity for the kernel
Jammy Zhou1333f722015-07-30 16:36:58 +0800121 * @jobs The max number of jobs in the job queue
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800122 *
123 * return 0 if succeed. negative error code on failure
124*/
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100125int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
126 struct drm_sched_entity *entity,
127 struct drm_sched_rq *rq,
Monk Liub3eebe32017-10-23 12:23:29 +0800128 uint32_t jobs, atomic_t *guilty)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800129{
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800130 if (!(sched && entity && rq))
131 return -EINVAL;
132
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100133 memset(entity, 0, sizeof(struct drm_sched_entity));
Christian König0f75aee2015-09-07 18:07:14 +0200134 INIT_LIST_HEAD(&entity->list);
135 entity->rq = rq;
136 entity->sched = sched;
Monk Liub3eebe32017-10-23 12:23:29 +0800137 entity->guilty = guilty;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800138
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400139 spin_lock_init(&entity->rq_lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800140 spin_lock_init(&entity->queue_lock);
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400141 spsc_queue_init(&entity->job_queue);
Christian König0f75aee2015-09-07 18:07:14 +0200142
Christian Königce882e62015-08-19 15:00:55 +0200143 atomic_set(&entity->fence_seq, 0);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100144 entity->fence_context = dma_fence_context_alloc(2);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800145
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800146 return 0;
147}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100148EXPORT_SYMBOL(drm_sched_entity_init);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800149
150/**
151 * Query if entity is initialized
152 *
153 * @sched Pointer to scheduler instance
154 * @entity The pointer to a valid scheduler entity
155 *
156 * return true if entity is initialized, false otherwise
157*/
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100158static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
159 struct drm_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800160{
Christian König0f75aee2015-09-07 18:07:14 +0200161 return entity->sched == sched &&
162 entity->rq != NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800163}
164
Christian Königaef48522015-08-20 14:47:46 +0200165/**
166 * Check if entity is idle
167 *
168 * @entity The pointer to a valid scheduler entity
169 *
170 * Return true if entity don't has any unscheduled jobs.
171 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100172static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800173{
Christian Königaef48522015-08-20 14:47:46 +0200174 rmb();
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400175 if (spsc_queue_peek(&entity->job_queue) == NULL)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800176 return true;
177
178 return false;
179}
180
181/**
Christian König3d651932015-11-12 21:10:35 +0100182 * Check if entity is ready
183 *
184 * @entity The pointer to a valid scheduler entity
185 *
186 * Return true if entity could provide a job.
187 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100188static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
Christian König3d651932015-11-12 21:10:35 +0100189{
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400190 if (spsc_queue_peek(&entity->job_queue) == NULL)
Christian König3d651932015-11-12 21:10:35 +0100191 return false;
192
Mark Rutland6aa7de02017-10-23 14:07:29 -0700193 if (READ_ONCE(entity->dependency))
Christian König3d651932015-11-12 21:10:35 +0100194 return false;
195
196 return true;
197}
198
199/**
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800200 * Destroy a context entity
201 *
202 * @sched Pointer to scheduler instance
203 * @entity The pointer to a valid scheduler entity
204 *
Christian König062c7fb2015-08-21 15:46:43 +0200205 * Cleanup and free the allocated resources.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800206 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100207void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
208 struct drm_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800209{
Christian Königf0694d32017-08-21 14:27:51 +0200210 int r;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800211
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100212 if (!drm_sched_entity_is_initialized(sched, entity))
Christian König062c7fb2015-08-21 15:46:43 +0200213 return;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800214 /**
215 * The client will not queue more IBs during this fini, consume existing
Christian Königf0694d32017-08-21 14:27:51 +0200216 * queued IBs or discard them on SIGKILL
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800217 */
Christian Königf0694d32017-08-21 14:27:51 +0200218 if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
219 r = -ERESTARTSYS;
220 else
221 r = wait_event_killable(sched->job_scheduled,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100222 drm_sched_entity_is_idle(entity));
223 drm_sched_entity_set_rq(entity, NULL);
Christian Königf0694d32017-08-21 14:27:51 +0200224 if (r) {
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100225 struct drm_sched_job *job;
Alex Deucherc9450122017-10-12 13:08:48 -0400226
Christian Königf0694d32017-08-21 14:27:51 +0200227 /* Park the kernel for a moment to make sure it isn't processing
228 * our enity.
229 */
230 kthread_park(sched->thread);
231 kthread_unpark(sched->thread);
Chunming Zhouf4323bc2017-11-07 10:40:00 +0800232 if (entity->dependency) {
233 dma_fence_remove_callback(entity->dependency,
234 &entity->cb);
235 dma_fence_put(entity->dependency);
236 entity->dependency = NULL;
237 }
238
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100239 while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
240 struct drm_sched_fence *s_fence = job->s_fence;
241 drm_sched_fence_scheduled(s_fence);
Nicolai Hähnle79867462017-09-28 11:57:32 +0200242 dma_fence_set_error(&s_fence->finished, -ESRCH);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100243 drm_sched_fence_finished(s_fence);
Chunming Zhou45bfd962017-11-07 10:27:43 +0800244 WARN_ON(s_fence->parent);
Nicolai Hähnle79867462017-09-28 11:57:32 +0200245 dma_fence_put(&s_fence->finished);
Christian Königf0694d32017-08-21 14:27:51 +0200246 sched->ops->free_job(job);
Nicolai Hähnle79867462017-09-28 11:57:32 +0200247 }
Christian Königf0694d32017-08-21 14:27:51 +0200248 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800249}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100250EXPORT_SYMBOL(drm_sched_entity_fini);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800251
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100252static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
Christian Könige61235d2015-08-25 11:05:36 +0200253{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100254 struct drm_sched_entity *entity =
255 container_of(cb, struct drm_sched_entity, cb);
Christian Könige61235d2015-08-25 11:05:36 +0200256 entity->dependency = NULL;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100257 dma_fence_put(f);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100258 drm_sched_wakeup(entity->sched);
Christian Könige61235d2015-08-25 11:05:36 +0200259}
260
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100261static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
Monk Liu777dbd42016-01-26 14:59:57 +0800262{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100263 struct drm_sched_entity *entity =
264 container_of(cb, struct drm_sched_entity, cb);
Monk Liu777dbd42016-01-26 14:59:57 +0800265 entity->dependency = NULL;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100266 dma_fence_put(f);
Monk Liu777dbd42016-01-26 14:59:57 +0800267}
268
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100269void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
270 struct drm_sched_rq *rq)
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400271{
272 if (entity->rq == rq)
273 return;
274
275 spin_lock(&entity->rq_lock);
276
277 if (entity->rq)
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100278 drm_sched_rq_remove_entity(entity->rq, entity);
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400279
280 entity->rq = rq;
281 if (rq)
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100282 drm_sched_rq_add_entity(rq, entity);
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400283
284 spin_unlock(&entity->rq_lock);
285}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100286EXPORT_SYMBOL(drm_sched_entity_set_rq);
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400287
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100288bool drm_sched_dependency_optimized(struct dma_fence* fence,
289 struct drm_sched_entity *entity)
Chunming Zhou30514de2017-05-09 13:39:40 +0800290{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100291 struct drm_gpu_scheduler *sched = entity->sched;
292 struct drm_sched_fence *s_fence;
Chunming Zhou30514de2017-05-09 13:39:40 +0800293
294 if (!fence || dma_fence_is_signaled(fence))
295 return false;
296 if (fence->context == entity->fence_context)
297 return true;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100298 s_fence = to_drm_sched_fence(fence);
Chunming Zhou30514de2017-05-09 13:39:40 +0800299 if (s_fence && s_fence->sched == sched)
300 return true;
301
302 return false;
303}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100304EXPORT_SYMBOL(drm_sched_dependency_optimized);
Chunming Zhou30514de2017-05-09 13:39:40 +0800305
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100306static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
Christian König393a0bd2015-11-05 12:57:10 +0100307{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100308 struct drm_gpu_scheduler *sched = entity->sched;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100309 struct dma_fence * fence = entity->dependency;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100310 struct drm_sched_fence *s_fence;
Christian König393a0bd2015-11-05 12:57:10 +0100311
312 if (fence->context == entity->fence_context) {
313 /* We can ignore fences from ourself */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100314 dma_fence_put(entity->dependency);
Christian König393a0bd2015-11-05 12:57:10 +0100315 return false;
316 }
317
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100318 s_fence = to_drm_sched_fence(fence);
Christian König393a0bd2015-11-05 12:57:10 +0100319 if (s_fence && s_fence->sched == sched) {
Christian König393a0bd2015-11-05 12:57:10 +0100320
Christian König6fc13672016-05-20 12:53:52 +0200321 /*
322 * Fence is from the same scheduler, only need to wait for
323 * it to be scheduled
324 */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100325 fence = dma_fence_get(&s_fence->scheduled);
326 dma_fence_put(entity->dependency);
Christian König6fc13672016-05-20 12:53:52 +0200327 entity->dependency = fence;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100328 if (!dma_fence_add_callback(fence, &entity->cb,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100329 drm_sched_entity_clear_dep))
Christian König6fc13672016-05-20 12:53:52 +0200330 return true;
331
332 /* Ignore it when it is already scheduled */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100333 dma_fence_put(fence);
Christian König6fc13672016-05-20 12:53:52 +0200334 return false;
Christian König393a0bd2015-11-05 12:57:10 +0100335 }
336
Chris Wilsonf54d1862016-10-25 13:00:45 +0100337 if (!dma_fence_add_callback(entity->dependency, &entity->cb,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100338 drm_sched_entity_wakeup))
Christian König393a0bd2015-11-05 12:57:10 +0100339 return true;
340
Chris Wilsonf54d1862016-10-25 13:00:45 +0100341 dma_fence_put(entity->dependency);
Christian König393a0bd2015-11-05 12:57:10 +0100342 return false;
343}
344
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100345static struct drm_sched_job *
346drm_sched_entity_pop_job(struct drm_sched_entity *entity)
Christian König69bd5bf2015-08-26 11:31:23 +0200347{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100348 struct drm_gpu_scheduler *sched = entity->sched;
349 struct drm_sched_job *sched_job = to_drm_sched_job(
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400350 spsc_queue_peek(&entity->job_queue));
Christian König69bd5bf2015-08-26 11:31:23 +0200351
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400352 if (!sched_job)
Christian König69bd5bf2015-08-26 11:31:23 +0200353 return NULL;
354
Andrey Grodzovskya4176cb2017-10-24 13:30:16 -0400355 while ((entity->dependency = sched->ops->dependency(sched_job, entity)))
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100356 if (drm_sched_entity_add_dependency_cb(entity))
Christian Könige61235d2015-08-25 11:05:36 +0200357 return NULL;
Christian Könige61235d2015-08-25 11:05:36 +0200358
Monk Liu48f05f22017-10-25 16:21:08 +0800359 /* skip jobs from entity that marked guilty */
360 if (entity->guilty && atomic_read(entity->guilty))
361 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
362
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400363 spsc_queue_pop(&entity->job_queue);
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800364 return sched_job;
Christian König69bd5bf2015-08-26 11:31:23 +0200365}
366
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800367/**
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400368 * Submit a job to the job queue
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800369 *
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800370 * @sched_job The pointer to job required to submit
Christian König6c859272015-08-20 16:12:50 +0200371 *
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400372 * Returns 0 for success, negative error code otherwise.
Christian König6c859272015-08-20 16:12:50 +0200373 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100374void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
375 struct drm_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800376{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100377 struct drm_gpu_scheduler *sched = sched_job->sched;
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400378 bool first = false;
379
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100380 trace_drm_sched_job(sched_job, entity);
Christian König6c859272015-08-20 16:12:50 +0200381
382 spin_lock(&entity->queue_lock);
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400383 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
Christian König6c859272015-08-20 16:12:50 +0200384
385 spin_unlock(&entity->queue_lock);
386
387 /* first job wakes up scheduler */
Chunming Zhoue8deea22015-12-11 18:22:52 +0800388 if (first) {
389 /* Add the entity to the run queue */
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400390 spin_lock(&entity->rq_lock);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100391 drm_sched_rq_add_entity(entity->rq, entity);
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400392 spin_unlock(&entity->rq_lock);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100393 drm_sched_wakeup(sched);
Chunming Zhoue8deea22015-12-11 18:22:52 +0800394 }
Christian König6c859272015-08-20 16:12:50 +0200395}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100396EXPORT_SYMBOL(drm_sched_entity_push_job);
Christian König6c859272015-08-20 16:12:50 +0200397
Nicolai Hähnle1650c142017-09-28 11:35:05 +0200398/* job_finish is called after hw fence signaled
Monk Liu0de24792016-03-04 18:51:02 +0800399 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100400static void drm_sched_job_finish(struct work_struct *work)
Monk Liu0de24792016-03-04 18:51:02 +0800401{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100402 struct drm_sched_job *s_job = container_of(work, struct drm_sched_job,
Christian Königc5f74f72016-05-19 09:54:15 +0200403 finish_work);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100404 struct drm_gpu_scheduler *sched = s_job->sched;
Monk Liu0de24792016-03-04 18:51:02 +0800405
Christian Königf42d20a92016-05-18 15:40:58 +0200406 /* remove job from ring_mirror_list */
Christian König1059e112016-06-13 16:12:43 +0200407 spin_lock(&sched->job_list_lock);
Christian Königf42d20a92016-05-18 15:40:58 +0200408 list_del_init(&s_job->node);
Monk Liu0de24792016-03-04 18:51:02 +0800409 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100410 struct drm_sched_job *next;
Christian Königc5f74f72016-05-19 09:54:15 +0200411
Christian König1059e112016-06-13 16:12:43 +0200412 spin_unlock(&sched->job_list_lock);
Christian Königc5f74f72016-05-19 09:54:15 +0200413 cancel_delayed_work_sync(&s_job->work_tdr);
Christian König1059e112016-06-13 16:12:43 +0200414 spin_lock(&sched->job_list_lock);
Monk Liu0de24792016-03-04 18:51:02 +0800415
416 /* queue TDR for next job */
417 next = list_first_entry_or_null(&sched->ring_mirror_list,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100418 struct drm_sched_job, node);
Monk Liu0de24792016-03-04 18:51:02 +0800419
Christian Königc5f74f72016-05-19 09:54:15 +0200420 if (next)
Monk Liu0de24792016-03-04 18:51:02 +0800421 schedule_delayed_work(&next->work_tdr, sched->timeout);
Monk Liu0de24792016-03-04 18:51:02 +0800422 }
Christian König1059e112016-06-13 16:12:43 +0200423 spin_unlock(&sched->job_list_lock);
Christian König7fd5e362017-10-13 10:58:15 +0200424 dma_fence_put(&s_job->s_fence->finished);
Christian Königc5f74f72016-05-19 09:54:15 +0200425 sched->ops->free_job(s_job);
426}
427
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100428static void drm_sched_job_finish_cb(struct dma_fence *f,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100429 struct dma_fence_cb *cb)
Christian Königc5f74f72016-05-19 09:54:15 +0200430{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100431 struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
Christian Königc5f74f72016-05-19 09:54:15 +0200432 finish_cb);
433 schedule_work(&job->finish_work);
Monk Liu0de24792016-03-04 18:51:02 +0800434}
435
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100436static void drm_sched_job_begin(struct drm_sched_job *s_job)
Monk Liu0de24792016-03-04 18:51:02 +0800437{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100438 struct drm_gpu_scheduler *sched = s_job->sched;
Monk Liu0de24792016-03-04 18:51:02 +0800439
Nicolai Hähnle214a91e2017-09-28 11:37:02 +0200440 dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100441 drm_sched_job_finish_cb);
Nicolai Hähnle214a91e2017-09-28 11:37:02 +0200442
Christian König1059e112016-06-13 16:12:43 +0200443 spin_lock(&sched->job_list_lock);
Christian Königf42d20a92016-05-18 15:40:58 +0200444 list_add_tail(&s_job->node, &sched->ring_mirror_list);
Monk Liu0de24792016-03-04 18:51:02 +0800445 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
Christian König16a71332016-05-18 09:43:07 +0200446 list_first_entry_or_null(&sched->ring_mirror_list,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100447 struct drm_sched_job, node) == s_job)
Monk Liu0de24792016-03-04 18:51:02 +0800448 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
Christian König1059e112016-06-13 16:12:43 +0200449 spin_unlock(&sched->job_list_lock);
Monk Liu0de24792016-03-04 18:51:02 +0800450}
451
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100452static void drm_sched_job_timedout(struct work_struct *work)
Christian König0e51a772016-05-18 14:19:32 +0200453{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100454 struct drm_sched_job *job = container_of(work, struct drm_sched_job,
Christian König0e51a772016-05-18 14:19:32 +0200455 work_tdr.work);
456
457 job->sched->ops->timedout_job(job);
458}
459
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100460void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
Chunming Zhoue686e752016-06-30 11:30:37 +0800461{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100462 struct drm_sched_job *s_job;
463 struct drm_sched_entity *entity, *tmp;
Monk Liua8a51a72017-10-16 19:46:43 +0800464 int i;;
Chunming Zhoue686e752016-06-30 11:30:37 +0800465
466 spin_lock(&sched->job_list_lock);
467 list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
Chunming Zhoua6bef672017-04-24 17:39:00 +0800468 if (s_job->s_fence->parent &&
469 dma_fence_remove_callback(s_job->s_fence->parent,
470 &s_job->s_fence->cb)) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100471 dma_fence_put(s_job->s_fence->parent);
Chunming Zhoue686e752016-06-30 11:30:37 +0800472 s_job->s_fence->parent = NULL;
Monk Liu65781c72017-05-11 13:36:44 +0800473 atomic_dec(&sched->hw_rq_count);
Chunming Zhoue686e752016-06-30 11:30:37 +0800474 }
475 }
Monk Liu65781c72017-05-11 13:36:44 +0800476 spin_unlock(&sched->job_list_lock);
Monk Liua8a51a72017-10-16 19:46:43 +0800477
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100478 if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
Monk Liucfb83b12017-11-08 14:35:04 +0800479 atomic_inc(&bad->karma);
Monk Liu48f05f22017-10-25 16:21:08 +0800480 /* don't increase @bad's karma if it's from KERNEL RQ,
481 * becuase sometimes GPU hang would cause kernel jobs (like VM updating jobs)
482 * corrupt but keep in mind that kernel jobs always considered good.
483 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100484 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL; i++ ) {
485 struct drm_sched_rq *rq = &sched->sched_rq[i];
Monk Liua8a51a72017-10-16 19:46:43 +0800486
487 spin_lock(&rq->lock);
488 list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
489 if (bad->s_fence->scheduled.context == entity->fence_context) {
Monk Liucfb83b12017-11-08 14:35:04 +0800490 if (atomic_read(&bad->karma) > bad->sched->hang_limit)
Monk Liu48f05f22017-10-25 16:21:08 +0800491 if (entity->guilty)
492 atomic_set(entity->guilty, 1);
Monk Liua8a51a72017-10-16 19:46:43 +0800493 break;
494 }
495 }
496 spin_unlock(&rq->lock);
Monk Liu48f05f22017-10-25 16:21:08 +0800497 if (&entity->list != &rq->entities)
Monk Liua8a51a72017-10-16 19:46:43 +0800498 break;
499 }
500 }
Monk Liu65781c72017-05-11 13:36:44 +0800501}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100502EXPORT_SYMBOL(drm_sched_hw_job_reset);
Monk Liu65781c72017-05-11 13:36:44 +0800503
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100504void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
Monk Liu65781c72017-05-11 13:36:44 +0800505{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100506 struct drm_sched_job *s_job, *tmp;
Monk Liu48f05f22017-10-25 16:21:08 +0800507 bool found_guilty = false;
Chunming Zhouec75f572016-06-29 15:23:55 +0800508 int r;
509
510 spin_lock(&sched->job_list_lock);
511 s_job = list_first_entry_or_null(&sched->ring_mirror_list,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100512 struct drm_sched_job, node);
Christian Königbdf00132016-08-16 19:52:35 +0200513 if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
Chunming Zhouec75f572016-06-29 15:23:55 +0800514 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
515
Chunming Zhou1c62cf92016-07-25 13:55:35 +0800516 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100517 struct drm_sched_fence *s_fence = s_job->s_fence;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100518 struct dma_fence *fence;
Monk Liu48f05f22017-10-25 16:21:08 +0800519 uint64_t guilty_context;
520
521 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
522 found_guilty = true;
523 guilty_context = s_job->s_fence->scheduled.context;
524 }
525
526 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
527 dma_fence_set_error(&s_fence->finished, -ECANCELED);
Chunming Zhoubdc2eea2016-07-22 13:01:02 +0800528
Chunming Zhou1c62cf92016-07-25 13:55:35 +0800529 spin_unlock(&sched->job_list_lock);
530 fence = sched->ops->run_job(s_job);
Chunming Zhoubdc2eea2016-07-22 13:01:02 +0800531 atomic_inc(&sched->hw_rq_count);
Chunming Zhouec75f572016-06-29 15:23:55 +0800532 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100533 s_fence->parent = dma_fence_get(fence);
534 r = dma_fence_add_callback(fence, &s_fence->cb,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100535 drm_sched_process_job);
Chunming Zhouec75f572016-06-29 15:23:55 +0800536 if (r == -ENOENT)
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100537 drm_sched_process_job(fence, &s_fence->cb);
Chunming Zhouec75f572016-06-29 15:23:55 +0800538 else if (r)
539 DRM_ERROR("fence add callback failed (%d)\n",
540 r);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100541 dma_fence_put(fence);
Chunming Zhouec75f572016-06-29 15:23:55 +0800542 } else {
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100543 drm_sched_process_job(NULL, &s_fence->cb);
Chunming Zhouec75f572016-06-29 15:23:55 +0800544 }
Chunming Zhou1c62cf92016-07-25 13:55:35 +0800545 spin_lock(&sched->job_list_lock);
Chunming Zhouec75f572016-06-29 15:23:55 +0800546 }
547 spin_unlock(&sched->job_list_lock);
548}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100549EXPORT_SYMBOL(drm_sched_job_recovery);
Chunming Zhouec75f572016-06-29 15:23:55 +0800550
Monk Liue6869412016-03-07 12:49:55 +0800551/* init a sched_job with basic field */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100552int drm_sched_job_init(struct drm_sched_job *job,
553 struct drm_gpu_scheduler *sched,
554 struct drm_sched_entity *entity,
Christian König595a9cd2016-06-30 10:52:03 +0200555 void *owner)
Monk Liue6869412016-03-07 12:49:55 +0800556{
557 job->sched = sched;
Andrey Grodzovskyd1f6dc12017-10-19 14:29:46 -0400558 job->s_priority = entity->rq - sched->sched_rq;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100559 job->s_fence = drm_sched_fence_create(entity, owner);
Monk Liue6869412016-03-07 12:49:55 +0800560 if (!job->s_fence)
561 return -ENOMEM;
Chunming Zhoucb3696f2017-05-09 15:34:07 +0800562 job->id = atomic64_inc_return(&sched->job_id_count);
Monk Liue6869412016-03-07 12:49:55 +0800563
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100564 INIT_WORK(&job->finish_work, drm_sched_job_finish);
Christian Königc5f74f72016-05-19 09:54:15 +0200565 INIT_LIST_HEAD(&job->node);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100566 INIT_DELAYED_WORK(&job->work_tdr, drm_sched_job_timedout);
Monk Liu48350962016-03-04 14:33:44 +0800567
Monk Liue6869412016-03-07 12:49:55 +0800568 return 0;
569}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100570EXPORT_SYMBOL(drm_sched_job_init);
Monk Liue6869412016-03-07 12:49:55 +0800571
Christian Könige688b7282015-08-20 17:01:01 +0200572/**
573 * Return ture if we can push more jobs to the hw.
574 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100575static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
Christian Könige688b7282015-08-20 17:01:01 +0200576{
577 return atomic_read(&sched->hw_rq_count) <
578 sched->hw_submission_limit;
579}
580
581/**
Christian König88079002015-08-24 14:29:40 +0200582 * Wake up the scheduler when it is ready
583 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100584static void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
Christian König88079002015-08-24 14:29:40 +0200585{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100586 if (drm_sched_ready(sched))
Christian Königc2b6bd72015-08-25 21:39:31 +0200587 wake_up_interruptible(&sched->wake_up_worker);
Christian König88079002015-08-24 14:29:40 +0200588}
589
590/**
Christian König3d651932015-11-12 21:10:35 +0100591 * Select next entity to process
Christian Könige688b7282015-08-20 17:01:01 +0200592*/
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100593static struct drm_sched_entity *
594drm_sched_select_entity(struct drm_gpu_scheduler *sched)
Christian Könige688b7282015-08-20 17:01:01 +0200595{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100596 struct drm_sched_entity *entity;
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800597 int i;
Christian Könige688b7282015-08-20 17:01:01 +0200598
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100599 if (!drm_sched_ready(sched))
Christian Könige688b7282015-08-20 17:01:01 +0200600 return NULL;
601
602 /* Kernel run queue has higher priority than normal run queue*/
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100603 for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
604 entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800605 if (entity)
606 break;
607 }
Christian Könige688b7282015-08-20 17:01:01 +0200608
Christian König3d651932015-11-12 21:10:35 +0100609 return entity;
Christian Könige688b7282015-08-20 17:01:01 +0200610}
611
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100612static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
Christian König6f0e54a2015-08-05 21:22:10 +0200613{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100614 struct drm_sched_fence *s_fence =
615 container_of(cb, struct drm_sched_fence, cb);
616 struct drm_gpu_scheduler *sched = s_fence->sched;
Christian König6f0e54a2015-08-05 21:22:10 +0200617
Christian König7fd5e362017-10-13 10:58:15 +0200618 dma_fence_get(&s_fence->finished);
Christian Königc746ba22015-08-19 16:12:15 +0200619 atomic_dec(&sched->hw_rq_count);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100620 drm_sched_fence_finished(s_fence);
Monk Liucccd9bc2016-03-04 14:42:26 +0800621
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100622 trace_drm_sched_process_job(s_fence);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100623 dma_fence_put(&s_fence->finished);
Christian Königc2b6bd72015-08-25 21:39:31 +0200624 wake_up_interruptible(&sched->wake_up_worker);
Christian König6f0e54a2015-08-05 21:22:10 +0200625}
626
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100627static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
Chunming Zhou0875dc92016-06-12 15:41:58 +0800628{
629 if (kthread_should_park()) {
630 kthread_parkme();
631 return true;
632 }
633
634 return false;
635}
636
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100637static int drm_sched_main(void *param)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800638{
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800639 struct sched_param sparam = {.sched_priority = 1};
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100640 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400641 int r;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800642
643 sched_setscheduler(current, SCHED_FIFO, &sparam);
644
645 while (!kthread_should_stop()) {
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100646 struct drm_sched_entity *entity = NULL;
647 struct drm_sched_fence *s_fence;
648 struct drm_sched_job *sched_job;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100649 struct dma_fence *fence;
Christian König6f0e54a2015-08-05 21:22:10 +0200650
Christian Königc2b6bd72015-08-25 21:39:31 +0200651 wait_event_interruptible(sched->wake_up_worker,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100652 (!drm_sched_blocked(sched) &&
653 (entity = drm_sched_select_entity(sched))) ||
Chunming Zhou0875dc92016-06-12 15:41:58 +0800654 kthread_should_stop());
Christian Königf85a6dd2015-08-19 17:37:52 +0200655
Christian König3d651932015-11-12 21:10:35 +0100656 if (!entity)
657 continue;
658
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100659 sched_job = drm_sched_entity_pop_job(entity);
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800660 if (!sched_job)
Christian Königf85a6dd2015-08-19 17:37:52 +0200661 continue;
662
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800663 s_fence = sched_job->s_fence;
Junwei Zhang2440ff22015-10-10 08:48:42 +0800664
Christian Königb034b572015-08-20 17:08:25 +0200665 atomic_inc(&sched->hw_rq_count);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100666 drm_sched_job_begin(sched_job);
Christian König7392c322016-05-18 13:00:38 +0200667
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800668 fence = sched->ops->run_job(sched_job);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100669 drm_sched_fence_scheduled(s_fence);
Nicolai Hähnle29d25352017-09-28 11:51:32 +0200670
Christian König6f0e54a2015-08-05 21:22:10 +0200671 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100672 s_fence->parent = dma_fence_get(fence);
673 r = dma_fence_add_callback(fence, &s_fence->cb,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100674 drm_sched_process_job);
Christian König6f0e54a2015-08-05 21:22:10 +0200675 if (r == -ENOENT)
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100676 drm_sched_process_job(fence, &s_fence->cb);
Christian König6f0e54a2015-08-05 21:22:10 +0200677 else if (r)
Christian König16a71332016-05-18 09:43:07 +0200678 DRM_ERROR("fence add callback failed (%d)\n",
679 r);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100680 dma_fence_put(fence);
Christian König27439fc2015-09-02 12:03:06 +0200681 } else {
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100682 drm_sched_process_job(NULL, &s_fence->cb);
Christian König6f0e54a2015-08-05 21:22:10 +0200683 }
Christian Königaef48522015-08-20 14:47:46 +0200684
Christian Königc2b6bd72015-08-25 21:39:31 +0200685 wake_up(&sched->job_scheduled);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800686 }
687 return 0;
688}
689
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800690/**
Christian König4f839a22015-09-08 20:22:31 +0200691 * Init a gpu scheduler instance
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800692 *
Christian König4f839a22015-09-08 20:22:31 +0200693 * @sched The pointer to the scheduler
Christian König69f7dd62015-08-20 17:24:40 +0200694 * @ops The backend operations for this scheduler.
Christian König69f7dd62015-08-20 17:24:40 +0200695 * @hw_submissions Number of hw submissions to do.
Christian König4f839a22015-09-08 20:22:31 +0200696 * @name Name used for debugging
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800697 *
Christian König4f839a22015-09-08 20:22:31 +0200698 * Return 0 on success, otherwise error code.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800699*/
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100700int drm_sched_init(struct drm_gpu_scheduler *sched,
701 const struct drm_sched_backend_ops *ops,
Monk Liu95aa9b12017-10-17 13:40:54 +0800702 unsigned hw_submission,
703 unsigned hang_limit,
704 long timeout,
705 const char *name)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800706{
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800707 int i;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800708 sched->ops = ops;
Chunming Zhou4cef9262015-08-05 19:52:14 +0800709 sched->hw_submission_limit = hw_submission;
Christian König4f839a22015-09-08 20:22:31 +0200710 sched->name = name;
Junwei Zhang2440ff22015-10-10 08:48:42 +0800711 sched->timeout = timeout;
Monk Liu95aa9b12017-10-17 13:40:54 +0800712 sched->hang_limit = hang_limit;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100713 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
714 drm_sched_rq_init(&sched->sched_rq[i]);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800715
Christian Königc2b6bd72015-08-25 21:39:31 +0200716 init_waitqueue_head(&sched->wake_up_worker);
717 init_waitqueue_head(&sched->job_scheduled);
Monk Liu48350962016-03-04 14:33:44 +0800718 INIT_LIST_HEAD(&sched->ring_mirror_list);
719 spin_lock_init(&sched->job_list_lock);
Christian Königc746ba22015-08-19 16:12:15 +0200720 atomic_set(&sched->hw_rq_count, 0);
Andres Rodriguez93f8b362017-03-09 21:25:50 -0500721 atomic64_set(&sched->job_id_count, 0);
Christian König4f839a22015-09-08 20:22:31 +0200722
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800723 /* Each scheduler will run on a seperate kernel thread */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100724 sched->thread = kthread_run(drm_sched_main, sched, sched->name);
Christian Königf4956592015-08-20 16:59:38 +0200725 if (IS_ERR(sched->thread)) {
Christian König4f839a22015-09-08 20:22:31 +0200726 DRM_ERROR("Failed to create scheduler for %s.\n", name);
727 return PTR_ERR(sched->thread);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800728 }
729
Christian König4f839a22015-09-08 20:22:31 +0200730 return 0;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800731}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100732EXPORT_SYMBOL(drm_sched_init);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800733
734/**
735 * Destroy a gpu scheduler
736 *
737 * @sched The pointer to the scheduler
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800738 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100739void drm_sched_fini(struct drm_gpu_scheduler *sched)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800740{
Dave Airlie32544d02015-11-03 11:10:03 -0500741 if (sched->thread)
742 kthread_stop(sched->thread);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800743}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100744EXPORT_SYMBOL(drm_sched_fini);