blob: 310275eaf128d32bda2994d07bd86df49f900608 [file] [log] [blame]
Jammy Zhoua72ce6f2015-05-22 18:55:07 +08001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080022 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +010023
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080024#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
Ingo Molnarae7e81c2017-02-01 18:07:51 +010027#include <uapi/linux/sched/types.h>
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080028#include <drm/drmP.h>
Lucas Stach1b1f42d2017-12-06 17:49:39 +010029#include <drm/gpu_scheduler.h>
30#include <drm/spsc_queue.h>
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -040031
Chunming Zhou353da3c2015-09-07 16:06:53 +080032#define CREATE_TRACE_POINTS
Nayan Deshmukha70cdb92018-03-29 22:36:33 +053033#include "gpu_scheduler_trace.h"
Chunming Zhou353da3c2015-09-07 16:06:53 +080034
Lucas Stach1b1f42d2017-12-06 17:49:39 +010035#define to_drm_sched_job(sched_job) \
36 container_of((sched_job), struct drm_sched_job, queue_node)
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -040037
Lucas Stach1b1f42d2017-12-06 17:49:39 +010038static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
39static void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
40static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
Christian König88079002015-08-24 14:29:40 +020041
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080042/* Initialize a given run queue struct */
Lucas Stach1b1f42d2017-12-06 17:49:39 +010043static void drm_sched_rq_init(struct drm_sched_rq *rq)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080044{
Christian König2b184d82015-08-18 14:41:25 +020045 spin_lock_init(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020046 INIT_LIST_HEAD(&rq->entities);
Christian König432a4ff2015-08-12 11:46:04 +020047 rq->current_entity = NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080048}
49
Lucas Stach1b1f42d2017-12-06 17:49:39 +010050static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
51 struct drm_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080052{
Chunming Zhoue8deea22015-12-11 18:22:52 +080053 if (!list_empty(&entity->list))
54 return;
Christian König2b184d82015-08-18 14:41:25 +020055 spin_lock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020056 list_add_tail(&entity->list, &rq->entities);
Christian König2b184d82015-08-18 14:41:25 +020057 spin_unlock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020058}
59
Lucas Stach1b1f42d2017-12-06 17:49:39 +010060static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
61 struct drm_sched_entity *entity)
Christian König432a4ff2015-08-12 11:46:04 +020062{
Chunming Zhoue8deea22015-12-11 18:22:52 +080063 if (list_empty(&entity->list))
64 return;
Christian König2b184d82015-08-18 14:41:25 +020065 spin_lock(&rq->lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080066 list_del_init(&entity->list);
Christian König432a4ff2015-08-12 11:46:04 +020067 if (rq->current_entity == entity)
68 rq->current_entity = NULL;
Christian König2b184d82015-08-18 14:41:25 +020069 spin_unlock(&rq->lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080070}
71
72/**
Christian König3d651932015-11-12 21:10:35 +010073 * Select an entity which could provide a job to run
74 *
75 * @rq The run queue to check.
76 *
77 * Try to find a ready entity, returns NULL if none found.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080078 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +010079static struct drm_sched_entity *
80drm_sched_rq_select_entity(struct drm_sched_rq *rq)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080081{
Lucas Stach1b1f42d2017-12-06 17:49:39 +010082 struct drm_sched_entity *entity;
Christian König4cd7f42c2015-08-05 18:18:52 +020083
Christian König2b184d82015-08-18 14:41:25 +020084 spin_lock(&rq->lock);
85
86 entity = rq->current_entity;
Christian König432a4ff2015-08-12 11:46:04 +020087 if (entity) {
88 list_for_each_entry_continue(entity, &rq->entities, list) {
Lucas Stach1b1f42d2017-12-06 17:49:39 +010089 if (drm_sched_entity_is_ready(entity)) {
Christian König432a4ff2015-08-12 11:46:04 +020090 rq->current_entity = entity;
Christian König2b184d82015-08-18 14:41:25 +020091 spin_unlock(&rq->lock);
Christian König3d651932015-11-12 21:10:35 +010092 return entity;
Christian König432a4ff2015-08-12 11:46:04 +020093 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080094 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080095 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080096
Christian König432a4ff2015-08-12 11:46:04 +020097 list_for_each_entry(entity, &rq->entities, list) {
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080098
Lucas Stach1b1f42d2017-12-06 17:49:39 +010099 if (drm_sched_entity_is_ready(entity)) {
Christian König432a4ff2015-08-12 11:46:04 +0200100 rq->current_entity = entity;
Christian König2b184d82015-08-18 14:41:25 +0200101 spin_unlock(&rq->lock);
Christian König3d651932015-11-12 21:10:35 +0100102 return entity;
Christian König432a4ff2015-08-12 11:46:04 +0200103 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800104
Christian König432a4ff2015-08-12 11:46:04 +0200105 if (entity == rq->current_entity)
106 break;
107 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800108
Christian König2b184d82015-08-18 14:41:25 +0200109 spin_unlock(&rq->lock);
110
Christian König432a4ff2015-08-12 11:46:04 +0200111 return NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800112}
113
114/**
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800115 * Init a context entity used by scheduler when submit to HW ring.
116 *
117 * @sched The pointer to the scheduler
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100118 * @entity The pointer to a valid drm_sched_entity
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800119 * @rq The run queue this entity belongs
Jammy Zhou1333f722015-07-30 16:36:58 +0800120 * @jobs The max number of jobs in the job queue
Nayan Deshmukhced54432018-03-29 22:36:31 +0530121 * @guilty atomic_t set to 1 when a job on this queue
122 * is found to be guilty causing a timeout
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800123 *
124 * return 0 if succeed. negative error code on failure
125*/
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100126int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
127 struct drm_sched_entity *entity,
128 struct drm_sched_rq *rq,
Monk Liub3eebe32017-10-23 12:23:29 +0800129 uint32_t jobs, atomic_t *guilty)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800130{
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800131 if (!(sched && entity && rq))
132 return -EINVAL;
133
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100134 memset(entity, 0, sizeof(struct drm_sched_entity));
Christian König0f75aee2015-09-07 18:07:14 +0200135 INIT_LIST_HEAD(&entity->list);
136 entity->rq = rq;
137 entity->sched = sched;
Monk Liub3eebe32017-10-23 12:23:29 +0800138 entity->guilty = guilty;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800139
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400140 spin_lock_init(&entity->rq_lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800141 spin_lock_init(&entity->queue_lock);
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400142 spsc_queue_init(&entity->job_queue);
Christian König0f75aee2015-09-07 18:07:14 +0200143
Christian Königce882e62015-08-19 15:00:55 +0200144 atomic_set(&entity->fence_seq, 0);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100145 entity->fence_context = dma_fence_context_alloc(2);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800146
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800147 return 0;
148}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100149EXPORT_SYMBOL(drm_sched_entity_init);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800150
151/**
152 * Query if entity is initialized
153 *
154 * @sched Pointer to scheduler instance
155 * @entity The pointer to a valid scheduler entity
156 *
157 * return true if entity is initialized, false otherwise
158*/
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100159static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
160 struct drm_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800161{
Christian König0f75aee2015-09-07 18:07:14 +0200162 return entity->sched == sched &&
163 entity->rq != NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800164}
165
Christian Königaef48522015-08-20 14:47:46 +0200166/**
167 * Check if entity is idle
168 *
169 * @entity The pointer to a valid scheduler entity
170 *
171 * Return true if entity don't has any unscheduled jobs.
172 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100173static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800174{
Christian Königaef48522015-08-20 14:47:46 +0200175 rmb();
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400176 if (spsc_queue_peek(&entity->job_queue) == NULL)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800177 return true;
178
179 return false;
180}
181
182/**
Christian König3d651932015-11-12 21:10:35 +0100183 * Check if entity is ready
184 *
185 * @entity The pointer to a valid scheduler entity
186 *
187 * Return true if entity could provide a job.
188 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100189static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
Christian König3d651932015-11-12 21:10:35 +0100190{
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400191 if (spsc_queue_peek(&entity->job_queue) == NULL)
Christian König3d651932015-11-12 21:10:35 +0100192 return false;
193
Mark Rutland6aa7de02017-10-23 14:07:29 -0700194 if (READ_ONCE(entity->dependency))
Christian König3d651932015-11-12 21:10:35 +0100195 return false;
196
197 return true;
198}
199
200/**
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800201 * Destroy a context entity
202 *
203 * @sched Pointer to scheduler instance
204 * @entity The pointer to a valid scheduler entity
205 *
Christian König062c7fb2015-08-21 15:46:43 +0200206 * Cleanup and free the allocated resources.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800207 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100208void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
209 struct drm_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800210{
Christian Königf0694d32017-08-21 14:27:51 +0200211 int r;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800212
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100213 if (!drm_sched_entity_is_initialized(sched, entity))
Christian König062c7fb2015-08-21 15:46:43 +0200214 return;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800215 /**
216 * The client will not queue more IBs during this fini, consume existing
Christian Königf0694d32017-08-21 14:27:51 +0200217 * queued IBs or discard them on SIGKILL
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800218 */
Christian Königf0694d32017-08-21 14:27:51 +0200219 if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
220 r = -ERESTARTSYS;
221 else
222 r = wait_event_killable(sched->job_scheduled,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100223 drm_sched_entity_is_idle(entity));
224 drm_sched_entity_set_rq(entity, NULL);
Christian Königf0694d32017-08-21 14:27:51 +0200225 if (r) {
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100226 struct drm_sched_job *job;
Alex Deucherc9450122017-10-12 13:08:48 -0400227
Christian Königf0694d32017-08-21 14:27:51 +0200228 /* Park the kernel for a moment to make sure it isn't processing
229 * our enity.
230 */
231 kthread_park(sched->thread);
232 kthread_unpark(sched->thread);
Chunming Zhouf4323bc2017-11-07 10:40:00 +0800233 if (entity->dependency) {
234 dma_fence_remove_callback(entity->dependency,
235 &entity->cb);
236 dma_fence_put(entity->dependency);
237 entity->dependency = NULL;
238 }
239
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100240 while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
241 struct drm_sched_fence *s_fence = job->s_fence;
242 drm_sched_fence_scheduled(s_fence);
Nicolai Hähnle79867462017-09-28 11:57:32 +0200243 dma_fence_set_error(&s_fence->finished, -ESRCH);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100244 drm_sched_fence_finished(s_fence);
Chunming Zhou45bfd962017-11-07 10:27:43 +0800245 WARN_ON(s_fence->parent);
Nicolai Hähnle79867462017-09-28 11:57:32 +0200246 dma_fence_put(&s_fence->finished);
Christian Königf0694d32017-08-21 14:27:51 +0200247 sched->ops->free_job(job);
Nicolai Hähnle79867462017-09-28 11:57:32 +0200248 }
Christian Königf0694d32017-08-21 14:27:51 +0200249 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800250}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100251EXPORT_SYMBOL(drm_sched_entity_fini);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800252
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100253static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
Christian Könige61235d2015-08-25 11:05:36 +0200254{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100255 struct drm_sched_entity *entity =
256 container_of(cb, struct drm_sched_entity, cb);
Christian Könige61235d2015-08-25 11:05:36 +0200257 entity->dependency = NULL;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100258 dma_fence_put(f);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100259 drm_sched_wakeup(entity->sched);
Christian Könige61235d2015-08-25 11:05:36 +0200260}
261
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100262static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
Monk Liu777dbd42016-01-26 14:59:57 +0800263{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100264 struct drm_sched_entity *entity =
265 container_of(cb, struct drm_sched_entity, cb);
Monk Liu777dbd42016-01-26 14:59:57 +0800266 entity->dependency = NULL;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100267 dma_fence_put(f);
Monk Liu777dbd42016-01-26 14:59:57 +0800268}
269
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100270void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
271 struct drm_sched_rq *rq)
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400272{
273 if (entity->rq == rq)
274 return;
275
276 spin_lock(&entity->rq_lock);
277
278 if (entity->rq)
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100279 drm_sched_rq_remove_entity(entity->rq, entity);
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400280
281 entity->rq = rq;
282 if (rq)
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100283 drm_sched_rq_add_entity(rq, entity);
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400284
285 spin_unlock(&entity->rq_lock);
286}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100287EXPORT_SYMBOL(drm_sched_entity_set_rq);
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400288
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100289bool drm_sched_dependency_optimized(struct dma_fence* fence,
290 struct drm_sched_entity *entity)
Chunming Zhou30514de2017-05-09 13:39:40 +0800291{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100292 struct drm_gpu_scheduler *sched = entity->sched;
293 struct drm_sched_fence *s_fence;
Chunming Zhou30514de2017-05-09 13:39:40 +0800294
295 if (!fence || dma_fence_is_signaled(fence))
296 return false;
297 if (fence->context == entity->fence_context)
298 return true;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100299 s_fence = to_drm_sched_fence(fence);
Chunming Zhou30514de2017-05-09 13:39:40 +0800300 if (s_fence && s_fence->sched == sched)
301 return true;
302
303 return false;
304}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100305EXPORT_SYMBOL(drm_sched_dependency_optimized);
Chunming Zhou30514de2017-05-09 13:39:40 +0800306
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100307static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
Christian König393a0bd2015-11-05 12:57:10 +0100308{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100309 struct drm_gpu_scheduler *sched = entity->sched;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100310 struct dma_fence * fence = entity->dependency;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100311 struct drm_sched_fence *s_fence;
Christian König393a0bd2015-11-05 12:57:10 +0100312
313 if (fence->context == entity->fence_context) {
314 /* We can ignore fences from ourself */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100315 dma_fence_put(entity->dependency);
Christian König393a0bd2015-11-05 12:57:10 +0100316 return false;
317 }
318
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100319 s_fence = to_drm_sched_fence(fence);
Christian König393a0bd2015-11-05 12:57:10 +0100320 if (s_fence && s_fence->sched == sched) {
Christian König393a0bd2015-11-05 12:57:10 +0100321
Christian König6fc13672016-05-20 12:53:52 +0200322 /*
323 * Fence is from the same scheduler, only need to wait for
324 * it to be scheduled
325 */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100326 fence = dma_fence_get(&s_fence->scheduled);
327 dma_fence_put(entity->dependency);
Christian König6fc13672016-05-20 12:53:52 +0200328 entity->dependency = fence;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100329 if (!dma_fence_add_callback(fence, &entity->cb,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100330 drm_sched_entity_clear_dep))
Christian König6fc13672016-05-20 12:53:52 +0200331 return true;
332
333 /* Ignore it when it is already scheduled */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100334 dma_fence_put(fence);
Christian König6fc13672016-05-20 12:53:52 +0200335 return false;
Christian König393a0bd2015-11-05 12:57:10 +0100336 }
337
Chris Wilsonf54d1862016-10-25 13:00:45 +0100338 if (!dma_fence_add_callback(entity->dependency, &entity->cb,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100339 drm_sched_entity_wakeup))
Christian König393a0bd2015-11-05 12:57:10 +0100340 return true;
341
Chris Wilsonf54d1862016-10-25 13:00:45 +0100342 dma_fence_put(entity->dependency);
Christian König393a0bd2015-11-05 12:57:10 +0100343 return false;
344}
345
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100346static struct drm_sched_job *
347drm_sched_entity_pop_job(struct drm_sched_entity *entity)
Christian König69bd5bf2015-08-26 11:31:23 +0200348{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100349 struct drm_gpu_scheduler *sched = entity->sched;
350 struct drm_sched_job *sched_job = to_drm_sched_job(
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400351 spsc_queue_peek(&entity->job_queue));
Christian König69bd5bf2015-08-26 11:31:23 +0200352
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400353 if (!sched_job)
Christian König69bd5bf2015-08-26 11:31:23 +0200354 return NULL;
355
Andrey Grodzovskya4176cb2017-10-24 13:30:16 -0400356 while ((entity->dependency = sched->ops->dependency(sched_job, entity)))
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100357 if (drm_sched_entity_add_dependency_cb(entity))
Christian Könige61235d2015-08-25 11:05:36 +0200358 return NULL;
Christian Könige61235d2015-08-25 11:05:36 +0200359
Monk Liu48f05f22017-10-25 16:21:08 +0800360 /* skip jobs from entity that marked guilty */
361 if (entity->guilty && atomic_read(entity->guilty))
362 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
363
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400364 spsc_queue_pop(&entity->job_queue);
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800365 return sched_job;
Christian König69bd5bf2015-08-26 11:31:23 +0200366}
367
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800368/**
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400369 * Submit a job to the job queue
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800370 *
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800371 * @sched_job The pointer to job required to submit
Christian König6c859272015-08-20 16:12:50 +0200372 *
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400373 * Returns 0 for success, negative error code otherwise.
Christian König6c859272015-08-20 16:12:50 +0200374 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100375void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
376 struct drm_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800377{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100378 struct drm_gpu_scheduler *sched = sched_job->sched;
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400379 bool first = false;
380
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100381 trace_drm_sched_job(sched_job, entity);
Christian König6c859272015-08-20 16:12:50 +0200382
383 spin_lock(&entity->queue_lock);
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400384 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
Christian König6c859272015-08-20 16:12:50 +0200385
386 spin_unlock(&entity->queue_lock);
387
388 /* first job wakes up scheduler */
Chunming Zhoue8deea22015-12-11 18:22:52 +0800389 if (first) {
390 /* Add the entity to the run queue */
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400391 spin_lock(&entity->rq_lock);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100392 drm_sched_rq_add_entity(entity->rq, entity);
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400393 spin_unlock(&entity->rq_lock);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100394 drm_sched_wakeup(sched);
Chunming Zhoue8deea22015-12-11 18:22:52 +0800395 }
Christian König6c859272015-08-20 16:12:50 +0200396}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100397EXPORT_SYMBOL(drm_sched_entity_push_job);
Christian König6c859272015-08-20 16:12:50 +0200398
Nicolai Hähnle1650c142017-09-28 11:35:05 +0200399/* job_finish is called after hw fence signaled
Monk Liu0de24792016-03-04 18:51:02 +0800400 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100401static void drm_sched_job_finish(struct work_struct *work)
Monk Liu0de24792016-03-04 18:51:02 +0800402{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100403 struct drm_sched_job *s_job = container_of(work, struct drm_sched_job,
Christian Königc5f74f72016-05-19 09:54:15 +0200404 finish_work);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100405 struct drm_gpu_scheduler *sched = s_job->sched;
Monk Liu0de24792016-03-04 18:51:02 +0800406
Christian Königf42d20a92016-05-18 15:40:58 +0200407 /* remove job from ring_mirror_list */
Christian König1059e112016-06-13 16:12:43 +0200408 spin_lock(&sched->job_list_lock);
Christian Königf42d20a92016-05-18 15:40:58 +0200409 list_del_init(&s_job->node);
Monk Liu0de24792016-03-04 18:51:02 +0800410 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100411 struct drm_sched_job *next;
Christian Königc5f74f72016-05-19 09:54:15 +0200412
Christian König1059e112016-06-13 16:12:43 +0200413 spin_unlock(&sched->job_list_lock);
Christian Königc5f74f72016-05-19 09:54:15 +0200414 cancel_delayed_work_sync(&s_job->work_tdr);
Christian König1059e112016-06-13 16:12:43 +0200415 spin_lock(&sched->job_list_lock);
Monk Liu0de24792016-03-04 18:51:02 +0800416
417 /* queue TDR for next job */
418 next = list_first_entry_or_null(&sched->ring_mirror_list,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100419 struct drm_sched_job, node);
Monk Liu0de24792016-03-04 18:51:02 +0800420
Christian Königc5f74f72016-05-19 09:54:15 +0200421 if (next)
Monk Liu0de24792016-03-04 18:51:02 +0800422 schedule_delayed_work(&next->work_tdr, sched->timeout);
Monk Liu0de24792016-03-04 18:51:02 +0800423 }
Christian König1059e112016-06-13 16:12:43 +0200424 spin_unlock(&sched->job_list_lock);
Christian König7fd5e362017-10-13 10:58:15 +0200425 dma_fence_put(&s_job->s_fence->finished);
Christian Königc5f74f72016-05-19 09:54:15 +0200426 sched->ops->free_job(s_job);
427}
428
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100429static void drm_sched_job_finish_cb(struct dma_fence *f,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100430 struct dma_fence_cb *cb)
Christian Königc5f74f72016-05-19 09:54:15 +0200431{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100432 struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
Christian Königc5f74f72016-05-19 09:54:15 +0200433 finish_cb);
434 schedule_work(&job->finish_work);
Monk Liu0de24792016-03-04 18:51:02 +0800435}
436
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100437static void drm_sched_job_begin(struct drm_sched_job *s_job)
Monk Liu0de24792016-03-04 18:51:02 +0800438{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100439 struct drm_gpu_scheduler *sched = s_job->sched;
Monk Liu0de24792016-03-04 18:51:02 +0800440
Nicolai Hähnle214a91e2017-09-28 11:37:02 +0200441 dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100442 drm_sched_job_finish_cb);
Nicolai Hähnle214a91e2017-09-28 11:37:02 +0200443
Christian König1059e112016-06-13 16:12:43 +0200444 spin_lock(&sched->job_list_lock);
Christian Königf42d20a92016-05-18 15:40:58 +0200445 list_add_tail(&s_job->node, &sched->ring_mirror_list);
Monk Liu0de24792016-03-04 18:51:02 +0800446 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
Christian König16a71332016-05-18 09:43:07 +0200447 list_first_entry_or_null(&sched->ring_mirror_list,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100448 struct drm_sched_job, node) == s_job)
Monk Liu0de24792016-03-04 18:51:02 +0800449 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
Christian König1059e112016-06-13 16:12:43 +0200450 spin_unlock(&sched->job_list_lock);
Monk Liu0de24792016-03-04 18:51:02 +0800451}
452
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100453static void drm_sched_job_timedout(struct work_struct *work)
Christian König0e51a772016-05-18 14:19:32 +0200454{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100455 struct drm_sched_job *job = container_of(work, struct drm_sched_job,
Christian König0e51a772016-05-18 14:19:32 +0200456 work_tdr.work);
457
458 job->sched->ops->timedout_job(job);
459}
460
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100461void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
Chunming Zhoue686e752016-06-30 11:30:37 +0800462{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100463 struct drm_sched_job *s_job;
464 struct drm_sched_entity *entity, *tmp;
Luis de Bethencourte2751492018-01-17 18:22:41 +0000465 int i;
Chunming Zhoue686e752016-06-30 11:30:37 +0800466
467 spin_lock(&sched->job_list_lock);
468 list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
Chunming Zhoua6bef672017-04-24 17:39:00 +0800469 if (s_job->s_fence->parent &&
470 dma_fence_remove_callback(s_job->s_fence->parent,
471 &s_job->s_fence->cb)) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100472 dma_fence_put(s_job->s_fence->parent);
Chunming Zhoue686e752016-06-30 11:30:37 +0800473 s_job->s_fence->parent = NULL;
Monk Liu65781c72017-05-11 13:36:44 +0800474 atomic_dec(&sched->hw_rq_count);
Chunming Zhoue686e752016-06-30 11:30:37 +0800475 }
476 }
Monk Liu65781c72017-05-11 13:36:44 +0800477 spin_unlock(&sched->job_list_lock);
Monk Liua8a51a72017-10-16 19:46:43 +0800478
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100479 if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
Monk Liucfb83b12017-11-08 14:35:04 +0800480 atomic_inc(&bad->karma);
Monk Liu48f05f22017-10-25 16:21:08 +0800481 /* don't increase @bad's karma if it's from KERNEL RQ,
482 * becuase sometimes GPU hang would cause kernel jobs (like VM updating jobs)
483 * corrupt but keep in mind that kernel jobs always considered good.
484 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100485 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL; i++ ) {
486 struct drm_sched_rq *rq = &sched->sched_rq[i];
Monk Liua8a51a72017-10-16 19:46:43 +0800487
488 spin_lock(&rq->lock);
489 list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
490 if (bad->s_fence->scheduled.context == entity->fence_context) {
Monk Liucfb83b12017-11-08 14:35:04 +0800491 if (atomic_read(&bad->karma) > bad->sched->hang_limit)
Monk Liu48f05f22017-10-25 16:21:08 +0800492 if (entity->guilty)
493 atomic_set(entity->guilty, 1);
Monk Liua8a51a72017-10-16 19:46:43 +0800494 break;
495 }
496 }
497 spin_unlock(&rq->lock);
Monk Liu48f05f22017-10-25 16:21:08 +0800498 if (&entity->list != &rq->entities)
Monk Liua8a51a72017-10-16 19:46:43 +0800499 break;
500 }
501 }
Monk Liu65781c72017-05-11 13:36:44 +0800502}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100503EXPORT_SYMBOL(drm_sched_hw_job_reset);
Monk Liu65781c72017-05-11 13:36:44 +0800504
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100505void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
Monk Liu65781c72017-05-11 13:36:44 +0800506{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100507 struct drm_sched_job *s_job, *tmp;
Monk Liu48f05f22017-10-25 16:21:08 +0800508 bool found_guilty = false;
Chunming Zhouec75f572016-06-29 15:23:55 +0800509 int r;
510
511 spin_lock(&sched->job_list_lock);
512 s_job = list_first_entry_or_null(&sched->ring_mirror_list,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100513 struct drm_sched_job, node);
Christian Königbdf00132016-08-16 19:52:35 +0200514 if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
Chunming Zhouec75f572016-06-29 15:23:55 +0800515 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
516
Chunming Zhou1c62cf92016-07-25 13:55:35 +0800517 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100518 struct drm_sched_fence *s_fence = s_job->s_fence;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100519 struct dma_fence *fence;
Monk Liu48f05f22017-10-25 16:21:08 +0800520 uint64_t guilty_context;
521
522 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
523 found_guilty = true;
524 guilty_context = s_job->s_fence->scheduled.context;
525 }
526
527 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
528 dma_fence_set_error(&s_fence->finished, -ECANCELED);
Chunming Zhoubdc2eea2016-07-22 13:01:02 +0800529
Chunming Zhou1c62cf92016-07-25 13:55:35 +0800530 spin_unlock(&sched->job_list_lock);
531 fence = sched->ops->run_job(s_job);
Chunming Zhoubdc2eea2016-07-22 13:01:02 +0800532 atomic_inc(&sched->hw_rq_count);
Chunming Zhouec75f572016-06-29 15:23:55 +0800533 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100534 s_fence->parent = dma_fence_get(fence);
535 r = dma_fence_add_callback(fence, &s_fence->cb,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100536 drm_sched_process_job);
Chunming Zhouec75f572016-06-29 15:23:55 +0800537 if (r == -ENOENT)
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100538 drm_sched_process_job(fence, &s_fence->cb);
Chunming Zhouec75f572016-06-29 15:23:55 +0800539 else if (r)
540 DRM_ERROR("fence add callback failed (%d)\n",
541 r);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100542 dma_fence_put(fence);
Chunming Zhouec75f572016-06-29 15:23:55 +0800543 } else {
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100544 drm_sched_process_job(NULL, &s_fence->cb);
Chunming Zhouec75f572016-06-29 15:23:55 +0800545 }
Chunming Zhou1c62cf92016-07-25 13:55:35 +0800546 spin_lock(&sched->job_list_lock);
Chunming Zhouec75f572016-06-29 15:23:55 +0800547 }
548 spin_unlock(&sched->job_list_lock);
549}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100550EXPORT_SYMBOL(drm_sched_job_recovery);
Chunming Zhouec75f572016-06-29 15:23:55 +0800551
Monk Liue6869412016-03-07 12:49:55 +0800552/* init a sched_job with basic field */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100553int drm_sched_job_init(struct drm_sched_job *job,
554 struct drm_gpu_scheduler *sched,
555 struct drm_sched_entity *entity,
Christian König595a9cd2016-06-30 10:52:03 +0200556 void *owner)
Monk Liue6869412016-03-07 12:49:55 +0800557{
558 job->sched = sched;
Andrey Grodzovskyd1f6dc12017-10-19 14:29:46 -0400559 job->s_priority = entity->rq - sched->sched_rq;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100560 job->s_fence = drm_sched_fence_create(entity, owner);
Monk Liue6869412016-03-07 12:49:55 +0800561 if (!job->s_fence)
562 return -ENOMEM;
Chunming Zhoucb3696f2017-05-09 15:34:07 +0800563 job->id = atomic64_inc_return(&sched->job_id_count);
Monk Liue6869412016-03-07 12:49:55 +0800564
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100565 INIT_WORK(&job->finish_work, drm_sched_job_finish);
Christian Königc5f74f72016-05-19 09:54:15 +0200566 INIT_LIST_HEAD(&job->node);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100567 INIT_DELAYED_WORK(&job->work_tdr, drm_sched_job_timedout);
Monk Liu48350962016-03-04 14:33:44 +0800568
Monk Liue6869412016-03-07 12:49:55 +0800569 return 0;
570}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100571EXPORT_SYMBOL(drm_sched_job_init);
Monk Liue6869412016-03-07 12:49:55 +0800572
Christian Könige688b7282015-08-20 17:01:01 +0200573/**
574 * Return ture if we can push more jobs to the hw.
575 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100576static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
Christian Könige688b7282015-08-20 17:01:01 +0200577{
578 return atomic_read(&sched->hw_rq_count) <
579 sched->hw_submission_limit;
580}
581
582/**
Christian König88079002015-08-24 14:29:40 +0200583 * Wake up the scheduler when it is ready
584 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100585static void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
Christian König88079002015-08-24 14:29:40 +0200586{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100587 if (drm_sched_ready(sched))
Christian Königc2b6bd72015-08-25 21:39:31 +0200588 wake_up_interruptible(&sched->wake_up_worker);
Christian König88079002015-08-24 14:29:40 +0200589}
590
591/**
Christian König3d651932015-11-12 21:10:35 +0100592 * Select next entity to process
Christian Könige688b7282015-08-20 17:01:01 +0200593*/
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100594static struct drm_sched_entity *
595drm_sched_select_entity(struct drm_gpu_scheduler *sched)
Christian Könige688b7282015-08-20 17:01:01 +0200596{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100597 struct drm_sched_entity *entity;
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800598 int i;
Christian Könige688b7282015-08-20 17:01:01 +0200599
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100600 if (!drm_sched_ready(sched))
Christian Könige688b7282015-08-20 17:01:01 +0200601 return NULL;
602
603 /* Kernel run queue has higher priority than normal run queue*/
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100604 for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
605 entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800606 if (entity)
607 break;
608 }
Christian Könige688b7282015-08-20 17:01:01 +0200609
Christian König3d651932015-11-12 21:10:35 +0100610 return entity;
Christian Könige688b7282015-08-20 17:01:01 +0200611}
612
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100613static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
Christian König6f0e54a2015-08-05 21:22:10 +0200614{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100615 struct drm_sched_fence *s_fence =
616 container_of(cb, struct drm_sched_fence, cb);
617 struct drm_gpu_scheduler *sched = s_fence->sched;
Christian König6f0e54a2015-08-05 21:22:10 +0200618
Christian König7fd5e362017-10-13 10:58:15 +0200619 dma_fence_get(&s_fence->finished);
Christian Königc746ba22015-08-19 16:12:15 +0200620 atomic_dec(&sched->hw_rq_count);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100621 drm_sched_fence_finished(s_fence);
Monk Liucccd9bc2016-03-04 14:42:26 +0800622
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100623 trace_drm_sched_process_job(s_fence);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100624 dma_fence_put(&s_fence->finished);
Christian Königc2b6bd72015-08-25 21:39:31 +0200625 wake_up_interruptible(&sched->wake_up_worker);
Christian König6f0e54a2015-08-05 21:22:10 +0200626}
627
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100628static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
Chunming Zhou0875dc92016-06-12 15:41:58 +0800629{
630 if (kthread_should_park()) {
631 kthread_parkme();
632 return true;
633 }
634
635 return false;
636}
637
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100638static int drm_sched_main(void *param)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800639{
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800640 struct sched_param sparam = {.sched_priority = 1};
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100641 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400642 int r;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800643
644 sched_setscheduler(current, SCHED_FIFO, &sparam);
645
646 while (!kthread_should_stop()) {
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100647 struct drm_sched_entity *entity = NULL;
648 struct drm_sched_fence *s_fence;
649 struct drm_sched_job *sched_job;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100650 struct dma_fence *fence;
Christian König6f0e54a2015-08-05 21:22:10 +0200651
Christian Königc2b6bd72015-08-25 21:39:31 +0200652 wait_event_interruptible(sched->wake_up_worker,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100653 (!drm_sched_blocked(sched) &&
654 (entity = drm_sched_select_entity(sched))) ||
Chunming Zhou0875dc92016-06-12 15:41:58 +0800655 kthread_should_stop());
Christian Königf85a6dd2015-08-19 17:37:52 +0200656
Christian König3d651932015-11-12 21:10:35 +0100657 if (!entity)
658 continue;
659
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100660 sched_job = drm_sched_entity_pop_job(entity);
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800661 if (!sched_job)
Christian Königf85a6dd2015-08-19 17:37:52 +0200662 continue;
663
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800664 s_fence = sched_job->s_fence;
Junwei Zhang2440ff22015-10-10 08:48:42 +0800665
Christian Königb034b572015-08-20 17:08:25 +0200666 atomic_inc(&sched->hw_rq_count);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100667 drm_sched_job_begin(sched_job);
Christian König7392c322016-05-18 13:00:38 +0200668
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800669 fence = sched->ops->run_job(sched_job);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100670 drm_sched_fence_scheduled(s_fence);
Nicolai Hähnle29d25352017-09-28 11:51:32 +0200671
Christian König6f0e54a2015-08-05 21:22:10 +0200672 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100673 s_fence->parent = dma_fence_get(fence);
674 r = dma_fence_add_callback(fence, &s_fence->cb,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100675 drm_sched_process_job);
Christian König6f0e54a2015-08-05 21:22:10 +0200676 if (r == -ENOENT)
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100677 drm_sched_process_job(fence, &s_fence->cb);
Christian König6f0e54a2015-08-05 21:22:10 +0200678 else if (r)
Christian König16a71332016-05-18 09:43:07 +0200679 DRM_ERROR("fence add callback failed (%d)\n",
680 r);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100681 dma_fence_put(fence);
Christian König27439fc2015-09-02 12:03:06 +0200682 } else {
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100683 drm_sched_process_job(NULL, &s_fence->cb);
Christian König6f0e54a2015-08-05 21:22:10 +0200684 }
Christian Königaef48522015-08-20 14:47:46 +0200685
Christian Königc2b6bd72015-08-25 21:39:31 +0200686 wake_up(&sched->job_scheduled);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800687 }
688 return 0;
689}
690
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800691/**
Christian König4f839a22015-09-08 20:22:31 +0200692 * Init a gpu scheduler instance
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800693 *
Christian König4f839a22015-09-08 20:22:31 +0200694 * @sched The pointer to the scheduler
Christian König69f7dd62015-08-20 17:24:40 +0200695 * @ops The backend operations for this scheduler.
Christian König69f7dd62015-08-20 17:24:40 +0200696 * @hw_submissions Number of hw submissions to do.
Christian König4f839a22015-09-08 20:22:31 +0200697 * @name Name used for debugging
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800698 *
Christian König4f839a22015-09-08 20:22:31 +0200699 * Return 0 on success, otherwise error code.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800700*/
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100701int drm_sched_init(struct drm_gpu_scheduler *sched,
702 const struct drm_sched_backend_ops *ops,
Monk Liu95aa9b12017-10-17 13:40:54 +0800703 unsigned hw_submission,
704 unsigned hang_limit,
705 long timeout,
706 const char *name)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800707{
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800708 int i;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800709 sched->ops = ops;
Chunming Zhou4cef9262015-08-05 19:52:14 +0800710 sched->hw_submission_limit = hw_submission;
Christian König4f839a22015-09-08 20:22:31 +0200711 sched->name = name;
Junwei Zhang2440ff22015-10-10 08:48:42 +0800712 sched->timeout = timeout;
Monk Liu95aa9b12017-10-17 13:40:54 +0800713 sched->hang_limit = hang_limit;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100714 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
715 drm_sched_rq_init(&sched->sched_rq[i]);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800716
Christian Königc2b6bd72015-08-25 21:39:31 +0200717 init_waitqueue_head(&sched->wake_up_worker);
718 init_waitqueue_head(&sched->job_scheduled);
Monk Liu48350962016-03-04 14:33:44 +0800719 INIT_LIST_HEAD(&sched->ring_mirror_list);
720 spin_lock_init(&sched->job_list_lock);
Christian Königc746ba22015-08-19 16:12:15 +0200721 atomic_set(&sched->hw_rq_count, 0);
Andres Rodriguez93f8b362017-03-09 21:25:50 -0500722 atomic64_set(&sched->job_id_count, 0);
Christian König4f839a22015-09-08 20:22:31 +0200723
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800724 /* Each scheduler will run on a seperate kernel thread */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100725 sched->thread = kthread_run(drm_sched_main, sched, sched->name);
Christian Königf4956592015-08-20 16:59:38 +0200726 if (IS_ERR(sched->thread)) {
Christian König4f839a22015-09-08 20:22:31 +0200727 DRM_ERROR("Failed to create scheduler for %s.\n", name);
728 return PTR_ERR(sched->thread);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800729 }
730
Christian König4f839a22015-09-08 20:22:31 +0200731 return 0;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800732}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100733EXPORT_SYMBOL(drm_sched_init);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800734
735/**
736 * Destroy a gpu scheduler
737 *
738 * @sched The pointer to the scheduler
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800739 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100740void drm_sched_fini(struct drm_gpu_scheduler *sched)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800741{
Dave Airlie32544d02015-11-03 11:10:03 -0500742 if (sched->thread)
743 kthread_stop(sched->thread);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800744}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100745EXPORT_SYMBOL(drm_sched_fini);