blob: 088ff2b4e8fbbb8378d41f525a8e025e16296942 [file] [log] [blame]
Jammy Zhoua72ce6f2015-05-22 18:55:07 +08001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080022 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +010023
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080024#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
Ingo Molnarae7e81c2017-02-01 18:07:51 +010027#include <uapi/linux/sched/types.h>
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080028#include <drm/drmP.h>
Lucas Stach1b1f42d2017-12-06 17:49:39 +010029#include <drm/gpu_scheduler.h>
30#include <drm/spsc_queue.h>
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -040031
Chunming Zhou353da3c2015-09-07 16:06:53 +080032#define CREATE_TRACE_POINTS
Nayan Deshmukha70cdb92018-03-29 22:36:33 +053033#include "gpu_scheduler_trace.h"
Chunming Zhou353da3c2015-09-07 16:06:53 +080034
Lucas Stach1b1f42d2017-12-06 17:49:39 +010035#define to_drm_sched_job(sched_job) \
36 container_of((sched_job), struct drm_sched_job, queue_node)
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -040037
Lucas Stach1b1f42d2017-12-06 17:49:39 +010038static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
39static void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
40static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
Christian König88079002015-08-24 14:29:40 +020041
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080042/* Initialize a given run queue struct */
Lucas Stach1b1f42d2017-12-06 17:49:39 +010043static void drm_sched_rq_init(struct drm_sched_rq *rq)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080044{
Christian König2b184d82015-08-18 14:41:25 +020045 spin_lock_init(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020046 INIT_LIST_HEAD(&rq->entities);
Christian König432a4ff2015-08-12 11:46:04 +020047 rq->current_entity = NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080048}
49
Lucas Stach1b1f42d2017-12-06 17:49:39 +010050static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
51 struct drm_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080052{
Chunming Zhoue8deea22015-12-11 18:22:52 +080053 if (!list_empty(&entity->list))
54 return;
Christian König2b184d82015-08-18 14:41:25 +020055 spin_lock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020056 list_add_tail(&entity->list, &rq->entities);
Christian König2b184d82015-08-18 14:41:25 +020057 spin_unlock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020058}
59
Lucas Stach1b1f42d2017-12-06 17:49:39 +010060static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
61 struct drm_sched_entity *entity)
Christian König432a4ff2015-08-12 11:46:04 +020062{
Chunming Zhoue8deea22015-12-11 18:22:52 +080063 if (list_empty(&entity->list))
64 return;
Christian König2b184d82015-08-18 14:41:25 +020065 spin_lock(&rq->lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080066 list_del_init(&entity->list);
Christian König432a4ff2015-08-12 11:46:04 +020067 if (rq->current_entity == entity)
68 rq->current_entity = NULL;
Christian König2b184d82015-08-18 14:41:25 +020069 spin_unlock(&rq->lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080070}
71
72/**
Christian König3d651932015-11-12 21:10:35 +010073 * Select an entity which could provide a job to run
74 *
75 * @rq The run queue to check.
76 *
77 * Try to find a ready entity, returns NULL if none found.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080078 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +010079static struct drm_sched_entity *
80drm_sched_rq_select_entity(struct drm_sched_rq *rq)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080081{
Lucas Stach1b1f42d2017-12-06 17:49:39 +010082 struct drm_sched_entity *entity;
Christian König4cd7f42c2015-08-05 18:18:52 +020083
Christian König2b184d82015-08-18 14:41:25 +020084 spin_lock(&rq->lock);
85
86 entity = rq->current_entity;
Christian König432a4ff2015-08-12 11:46:04 +020087 if (entity) {
88 list_for_each_entry_continue(entity, &rq->entities, list) {
Lucas Stach1b1f42d2017-12-06 17:49:39 +010089 if (drm_sched_entity_is_ready(entity)) {
Christian König432a4ff2015-08-12 11:46:04 +020090 rq->current_entity = entity;
Christian König2b184d82015-08-18 14:41:25 +020091 spin_unlock(&rq->lock);
Christian König3d651932015-11-12 21:10:35 +010092 return entity;
Christian König432a4ff2015-08-12 11:46:04 +020093 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080094 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080095 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080096
Christian König432a4ff2015-08-12 11:46:04 +020097 list_for_each_entry(entity, &rq->entities, list) {
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080098
Lucas Stach1b1f42d2017-12-06 17:49:39 +010099 if (drm_sched_entity_is_ready(entity)) {
Christian König432a4ff2015-08-12 11:46:04 +0200100 rq->current_entity = entity;
Christian König2b184d82015-08-18 14:41:25 +0200101 spin_unlock(&rq->lock);
Christian König3d651932015-11-12 21:10:35 +0100102 return entity;
Christian König432a4ff2015-08-12 11:46:04 +0200103 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800104
Christian König432a4ff2015-08-12 11:46:04 +0200105 if (entity == rq->current_entity)
106 break;
107 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800108
Christian König2b184d82015-08-18 14:41:25 +0200109 spin_unlock(&rq->lock);
110
Christian König432a4ff2015-08-12 11:46:04 +0200111 return NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800112}
113
114/**
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800115 * Init a context entity used by scheduler when submit to HW ring.
116 *
117 * @sched The pointer to the scheduler
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100118 * @entity The pointer to a valid drm_sched_entity
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800119 * @rq The run queue this entity belongs
Jammy Zhou1333f722015-07-30 16:36:58 +0800120 * @jobs The max number of jobs in the job queue
Nayan Deshmukhced54432018-03-29 22:36:31 +0530121 * @guilty atomic_t set to 1 when a job on this queue
122 * is found to be guilty causing a timeout
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800123 *
124 * return 0 if succeed. negative error code on failure
125*/
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100126int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
127 struct drm_sched_entity *entity,
128 struct drm_sched_rq *rq,
Monk Liub3eebe32017-10-23 12:23:29 +0800129 uint32_t jobs, atomic_t *guilty)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800130{
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800131 if (!(sched && entity && rq))
132 return -EINVAL;
133
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100134 memset(entity, 0, sizeof(struct drm_sched_entity));
Christian König0f75aee2015-09-07 18:07:14 +0200135 INIT_LIST_HEAD(&entity->list);
136 entity->rq = rq;
137 entity->sched = sched;
Monk Liub3eebe32017-10-23 12:23:29 +0800138 entity->guilty = guilty;
Emily Deng8ee3a522018-04-16 10:07:02 +0800139 entity->fini_status = 0;
140 entity->last_scheduled = NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800141
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400142 spin_lock_init(&entity->rq_lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800143 spin_lock_init(&entity->queue_lock);
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400144 spsc_queue_init(&entity->job_queue);
Christian König0f75aee2015-09-07 18:07:14 +0200145
Christian Königce882e62015-08-19 15:00:55 +0200146 atomic_set(&entity->fence_seq, 0);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100147 entity->fence_context = dma_fence_context_alloc(2);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800148
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800149 return 0;
150}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100151EXPORT_SYMBOL(drm_sched_entity_init);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800152
153/**
154 * Query if entity is initialized
155 *
156 * @sched Pointer to scheduler instance
157 * @entity The pointer to a valid scheduler entity
158 *
159 * return true if entity is initialized, false otherwise
160*/
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100161static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
162 struct drm_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800163{
Christian König0f75aee2015-09-07 18:07:14 +0200164 return entity->sched == sched &&
165 entity->rq != NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800166}
167
Christian Königaef48522015-08-20 14:47:46 +0200168/**
169 * Check if entity is idle
170 *
171 * @entity The pointer to a valid scheduler entity
172 *
173 * Return true if entity don't has any unscheduled jobs.
174 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100175static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800176{
Christian Königaef48522015-08-20 14:47:46 +0200177 rmb();
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400178 if (spsc_queue_peek(&entity->job_queue) == NULL)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800179 return true;
180
181 return false;
182}
183
184/**
Christian König3d651932015-11-12 21:10:35 +0100185 * Check if entity is ready
186 *
187 * @entity The pointer to a valid scheduler entity
188 *
189 * Return true if entity could provide a job.
190 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100191static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
Christian König3d651932015-11-12 21:10:35 +0100192{
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400193 if (spsc_queue_peek(&entity->job_queue) == NULL)
Christian König3d651932015-11-12 21:10:35 +0100194 return false;
195
Mark Rutland6aa7de02017-10-23 14:07:29 -0700196 if (READ_ONCE(entity->dependency))
Christian König3d651932015-11-12 21:10:35 +0100197 return false;
198
199 return true;
200}
201
Emily Deng8ee3a522018-04-16 10:07:02 +0800202static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
203 struct dma_fence_cb *cb)
204{
205 struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
206 finish_cb);
207 drm_sched_fence_finished(job->s_fence);
208 WARN_ON(job->s_fence->parent);
209 dma_fence_put(&job->s_fence->finished);
210 job->sched->ops->free_job(job);
211}
212
213
Christian König3d651932015-11-12 21:10:35 +0100214/**
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800215 * Destroy a context entity
216 *
217 * @sched Pointer to scheduler instance
218 * @entity The pointer to a valid scheduler entity
219 *
Emily Deng8ee3a522018-04-16 10:07:02 +0800220 * Splitting drm_sched_entity_fini() into two functions, The first one is does the waiting,
221 * removes the entity from the runqueue and returns an error when the process was killed.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800222 */
Emily Deng8ee3a522018-04-16 10:07:02 +0800223void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100224 struct drm_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800225{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100226 if (!drm_sched_entity_is_initialized(sched, entity))
Christian König062c7fb2015-08-21 15:46:43 +0200227 return;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800228 /**
229 * The client will not queue more IBs during this fini, consume existing
Christian Königf0694d32017-08-21 14:27:51 +0200230 * queued IBs or discard them on SIGKILL
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800231 */
Christian Königf0694d32017-08-21 14:27:51 +0200232 if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
Emily Deng8ee3a522018-04-16 10:07:02 +0800233 entity->fini_status = -ERESTARTSYS;
Christian Königf0694d32017-08-21 14:27:51 +0200234 else
Emily Deng8ee3a522018-04-16 10:07:02 +0800235 entity->fini_status = wait_event_killable(sched->job_scheduled,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100236 drm_sched_entity_is_idle(entity));
237 drm_sched_entity_set_rq(entity, NULL);
Emily Deng8ee3a522018-04-16 10:07:02 +0800238}
239EXPORT_SYMBOL(drm_sched_entity_do_release);
240
241/**
242 * Destroy a context entity
243 *
244 * @sched Pointer to scheduler instance
245 * @entity The pointer to a valid scheduler entity
246 *
247 * The second one then goes over the entity and signals all jobs with an error code.
248 */
249void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
250 struct drm_sched_entity *entity)
251{
252 if (entity->fini_status) {
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100253 struct drm_sched_job *job;
Emily Deng8ee3a522018-04-16 10:07:02 +0800254 int r;
Alex Deucherc9450122017-10-12 13:08:48 -0400255
Christian Königf0694d32017-08-21 14:27:51 +0200256 /* Park the kernel for a moment to make sure it isn't processing
257 * our enity.
258 */
259 kthread_park(sched->thread);
260 kthread_unpark(sched->thread);
Chunming Zhouf4323bc2017-11-07 10:40:00 +0800261 if (entity->dependency) {
262 dma_fence_remove_callback(entity->dependency,
263 &entity->cb);
264 dma_fence_put(entity->dependency);
265 entity->dependency = NULL;
266 }
267
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100268 while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
269 struct drm_sched_fence *s_fence = job->s_fence;
270 drm_sched_fence_scheduled(s_fence);
Nicolai Hähnle79867462017-09-28 11:57:32 +0200271 dma_fence_set_error(&s_fence->finished, -ESRCH);
Emily Deng8ee3a522018-04-16 10:07:02 +0800272 r = dma_fence_add_callback(entity->last_scheduled, &job->finish_cb,
273 drm_sched_entity_kill_jobs_cb);
274 if (r == -ENOENT)
275 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
276 else if (r)
277 DRM_ERROR("fence add callback failed (%d)\n", r);
Nicolai Hähnle79867462017-09-28 11:57:32 +0200278 }
Christian Königf0694d32017-08-21 14:27:51 +0200279 }
Pixel Dinga4b39962018-04-18 04:33:26 -0400280
281 dma_fence_put(entity->last_scheduled);
282 entity->last_scheduled = NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800283}
Emily Deng8ee3a522018-04-16 10:07:02 +0800284EXPORT_SYMBOL(drm_sched_entity_cleanup);
285
286void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
287 struct drm_sched_entity *entity)
288{
289 drm_sched_entity_do_release(sched, entity);
290 drm_sched_entity_cleanup(sched, entity);
291}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100292EXPORT_SYMBOL(drm_sched_entity_fini);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800293
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100294static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
Christian Könige61235d2015-08-25 11:05:36 +0200295{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100296 struct drm_sched_entity *entity =
297 container_of(cb, struct drm_sched_entity, cb);
Christian Könige61235d2015-08-25 11:05:36 +0200298 entity->dependency = NULL;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100299 dma_fence_put(f);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100300 drm_sched_wakeup(entity->sched);
Christian Könige61235d2015-08-25 11:05:36 +0200301}
302
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100303static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
Monk Liu777dbd42016-01-26 14:59:57 +0800304{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100305 struct drm_sched_entity *entity =
306 container_of(cb, struct drm_sched_entity, cb);
Monk Liu777dbd42016-01-26 14:59:57 +0800307 entity->dependency = NULL;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100308 dma_fence_put(f);
Monk Liu777dbd42016-01-26 14:59:57 +0800309}
310
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100311void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
312 struct drm_sched_rq *rq)
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400313{
314 if (entity->rq == rq)
315 return;
316
317 spin_lock(&entity->rq_lock);
318
319 if (entity->rq)
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100320 drm_sched_rq_remove_entity(entity->rq, entity);
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400321
322 entity->rq = rq;
323 if (rq)
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100324 drm_sched_rq_add_entity(rq, entity);
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400325
326 spin_unlock(&entity->rq_lock);
327}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100328EXPORT_SYMBOL(drm_sched_entity_set_rq);
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400329
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100330bool drm_sched_dependency_optimized(struct dma_fence* fence,
331 struct drm_sched_entity *entity)
Chunming Zhou30514de2017-05-09 13:39:40 +0800332{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100333 struct drm_gpu_scheduler *sched = entity->sched;
334 struct drm_sched_fence *s_fence;
Chunming Zhou30514de2017-05-09 13:39:40 +0800335
336 if (!fence || dma_fence_is_signaled(fence))
337 return false;
338 if (fence->context == entity->fence_context)
339 return true;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100340 s_fence = to_drm_sched_fence(fence);
Chunming Zhou30514de2017-05-09 13:39:40 +0800341 if (s_fence && s_fence->sched == sched)
342 return true;
343
344 return false;
345}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100346EXPORT_SYMBOL(drm_sched_dependency_optimized);
Chunming Zhou30514de2017-05-09 13:39:40 +0800347
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100348static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
Christian König393a0bd2015-11-05 12:57:10 +0100349{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100350 struct drm_gpu_scheduler *sched = entity->sched;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100351 struct dma_fence * fence = entity->dependency;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100352 struct drm_sched_fence *s_fence;
Christian König393a0bd2015-11-05 12:57:10 +0100353
354 if (fence->context == entity->fence_context) {
355 /* We can ignore fences from ourself */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100356 dma_fence_put(entity->dependency);
Christian König393a0bd2015-11-05 12:57:10 +0100357 return false;
358 }
359
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100360 s_fence = to_drm_sched_fence(fence);
Christian König393a0bd2015-11-05 12:57:10 +0100361 if (s_fence && s_fence->sched == sched) {
Christian König393a0bd2015-11-05 12:57:10 +0100362
Christian König6fc13672016-05-20 12:53:52 +0200363 /*
364 * Fence is from the same scheduler, only need to wait for
365 * it to be scheduled
366 */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100367 fence = dma_fence_get(&s_fence->scheduled);
368 dma_fence_put(entity->dependency);
Christian König6fc13672016-05-20 12:53:52 +0200369 entity->dependency = fence;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100370 if (!dma_fence_add_callback(fence, &entity->cb,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100371 drm_sched_entity_clear_dep))
Christian König6fc13672016-05-20 12:53:52 +0200372 return true;
373
374 /* Ignore it when it is already scheduled */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100375 dma_fence_put(fence);
Christian König6fc13672016-05-20 12:53:52 +0200376 return false;
Christian König393a0bd2015-11-05 12:57:10 +0100377 }
378
Chris Wilsonf54d1862016-10-25 13:00:45 +0100379 if (!dma_fence_add_callback(entity->dependency, &entity->cb,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100380 drm_sched_entity_wakeup))
Christian König393a0bd2015-11-05 12:57:10 +0100381 return true;
382
Chris Wilsonf54d1862016-10-25 13:00:45 +0100383 dma_fence_put(entity->dependency);
Christian König393a0bd2015-11-05 12:57:10 +0100384 return false;
385}
386
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100387static struct drm_sched_job *
388drm_sched_entity_pop_job(struct drm_sched_entity *entity)
Christian König69bd5bf2015-08-26 11:31:23 +0200389{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100390 struct drm_gpu_scheduler *sched = entity->sched;
391 struct drm_sched_job *sched_job = to_drm_sched_job(
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400392 spsc_queue_peek(&entity->job_queue));
Christian König69bd5bf2015-08-26 11:31:23 +0200393
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400394 if (!sched_job)
Christian König69bd5bf2015-08-26 11:31:23 +0200395 return NULL;
396
Andrey Grodzovskya4176cb2017-10-24 13:30:16 -0400397 while ((entity->dependency = sched->ops->dependency(sched_job, entity)))
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100398 if (drm_sched_entity_add_dependency_cb(entity))
Christian Könige61235d2015-08-25 11:05:36 +0200399 return NULL;
Christian Könige61235d2015-08-25 11:05:36 +0200400
Monk Liu48f05f22017-10-25 16:21:08 +0800401 /* skip jobs from entity that marked guilty */
402 if (entity->guilty && atomic_read(entity->guilty))
403 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
404
Pixel Dingb5b4ea42018-04-18 04:37:40 -0400405 dma_fence_put(entity->last_scheduled);
406 entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
407
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400408 spsc_queue_pop(&entity->job_queue);
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800409 return sched_job;
Christian König69bd5bf2015-08-26 11:31:23 +0200410}
411
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800412/**
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400413 * Submit a job to the job queue
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800414 *
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800415 * @sched_job The pointer to job required to submit
Christian König6c859272015-08-20 16:12:50 +0200416 *
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400417 * Returns 0 for success, negative error code otherwise.
Christian König6c859272015-08-20 16:12:50 +0200418 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100419void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
420 struct drm_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800421{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100422 struct drm_gpu_scheduler *sched = sched_job->sched;
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400423 bool first = false;
424
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100425 trace_drm_sched_job(sched_job, entity);
Christian König6c859272015-08-20 16:12:50 +0200426
427 spin_lock(&entity->queue_lock);
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400428 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
Christian König6c859272015-08-20 16:12:50 +0200429
430 spin_unlock(&entity->queue_lock);
431
432 /* first job wakes up scheduler */
Chunming Zhoue8deea22015-12-11 18:22:52 +0800433 if (first) {
434 /* Add the entity to the run queue */
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400435 spin_lock(&entity->rq_lock);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100436 drm_sched_rq_add_entity(entity->rq, entity);
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400437 spin_unlock(&entity->rq_lock);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100438 drm_sched_wakeup(sched);
Chunming Zhoue8deea22015-12-11 18:22:52 +0800439 }
Christian König6c859272015-08-20 16:12:50 +0200440}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100441EXPORT_SYMBOL(drm_sched_entity_push_job);
Christian König6c859272015-08-20 16:12:50 +0200442
Nicolai Hähnle1650c142017-09-28 11:35:05 +0200443/* job_finish is called after hw fence signaled
Monk Liu0de24792016-03-04 18:51:02 +0800444 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100445static void drm_sched_job_finish(struct work_struct *work)
Monk Liu0de24792016-03-04 18:51:02 +0800446{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100447 struct drm_sched_job *s_job = container_of(work, struct drm_sched_job,
Christian Königc5f74f72016-05-19 09:54:15 +0200448 finish_work);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100449 struct drm_gpu_scheduler *sched = s_job->sched;
Monk Liu0de24792016-03-04 18:51:02 +0800450
Christian Königf42d20a92016-05-18 15:40:58 +0200451 /* remove job from ring_mirror_list */
Christian König1059e112016-06-13 16:12:43 +0200452 spin_lock(&sched->job_list_lock);
Christian Königf42d20a92016-05-18 15:40:58 +0200453 list_del_init(&s_job->node);
Monk Liu0de24792016-03-04 18:51:02 +0800454 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100455 struct drm_sched_job *next;
Christian Königc5f74f72016-05-19 09:54:15 +0200456
Christian König1059e112016-06-13 16:12:43 +0200457 spin_unlock(&sched->job_list_lock);
Christian Königc5f74f72016-05-19 09:54:15 +0200458 cancel_delayed_work_sync(&s_job->work_tdr);
Christian König1059e112016-06-13 16:12:43 +0200459 spin_lock(&sched->job_list_lock);
Monk Liu0de24792016-03-04 18:51:02 +0800460
461 /* queue TDR for next job */
462 next = list_first_entry_or_null(&sched->ring_mirror_list,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100463 struct drm_sched_job, node);
Monk Liu0de24792016-03-04 18:51:02 +0800464
Christian Königc5f74f72016-05-19 09:54:15 +0200465 if (next)
Monk Liu0de24792016-03-04 18:51:02 +0800466 schedule_delayed_work(&next->work_tdr, sched->timeout);
Monk Liu0de24792016-03-04 18:51:02 +0800467 }
Christian König1059e112016-06-13 16:12:43 +0200468 spin_unlock(&sched->job_list_lock);
Christian König7fd5e362017-10-13 10:58:15 +0200469 dma_fence_put(&s_job->s_fence->finished);
Christian Königc5f74f72016-05-19 09:54:15 +0200470 sched->ops->free_job(s_job);
471}
472
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100473static void drm_sched_job_finish_cb(struct dma_fence *f,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100474 struct dma_fence_cb *cb)
Christian Königc5f74f72016-05-19 09:54:15 +0200475{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100476 struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
Christian Königc5f74f72016-05-19 09:54:15 +0200477 finish_cb);
478 schedule_work(&job->finish_work);
Monk Liu0de24792016-03-04 18:51:02 +0800479}
480
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100481static void drm_sched_job_begin(struct drm_sched_job *s_job)
Monk Liu0de24792016-03-04 18:51:02 +0800482{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100483 struct drm_gpu_scheduler *sched = s_job->sched;
Monk Liu0de24792016-03-04 18:51:02 +0800484
Nicolai Hähnle214a91e2017-09-28 11:37:02 +0200485 dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100486 drm_sched_job_finish_cb);
Nicolai Hähnle214a91e2017-09-28 11:37:02 +0200487
Christian König1059e112016-06-13 16:12:43 +0200488 spin_lock(&sched->job_list_lock);
Christian Königf42d20a92016-05-18 15:40:58 +0200489 list_add_tail(&s_job->node, &sched->ring_mirror_list);
Monk Liu0de24792016-03-04 18:51:02 +0800490 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
Christian König16a71332016-05-18 09:43:07 +0200491 list_first_entry_or_null(&sched->ring_mirror_list,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100492 struct drm_sched_job, node) == s_job)
Monk Liu0de24792016-03-04 18:51:02 +0800493 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
Christian König1059e112016-06-13 16:12:43 +0200494 spin_unlock(&sched->job_list_lock);
Monk Liu0de24792016-03-04 18:51:02 +0800495}
496
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100497static void drm_sched_job_timedout(struct work_struct *work)
Christian König0e51a772016-05-18 14:19:32 +0200498{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100499 struct drm_sched_job *job = container_of(work, struct drm_sched_job,
Christian König0e51a772016-05-18 14:19:32 +0200500 work_tdr.work);
501
502 job->sched->ops->timedout_job(job);
503}
504
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100505void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
Chunming Zhoue686e752016-06-30 11:30:37 +0800506{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100507 struct drm_sched_job *s_job;
508 struct drm_sched_entity *entity, *tmp;
Luis de Bethencourte2751492018-01-17 18:22:41 +0000509 int i;
Chunming Zhoue686e752016-06-30 11:30:37 +0800510
511 spin_lock(&sched->job_list_lock);
512 list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
Chunming Zhoua6bef672017-04-24 17:39:00 +0800513 if (s_job->s_fence->parent &&
514 dma_fence_remove_callback(s_job->s_fence->parent,
515 &s_job->s_fence->cb)) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100516 dma_fence_put(s_job->s_fence->parent);
Chunming Zhoue686e752016-06-30 11:30:37 +0800517 s_job->s_fence->parent = NULL;
Monk Liu65781c72017-05-11 13:36:44 +0800518 atomic_dec(&sched->hw_rq_count);
Chunming Zhoue686e752016-06-30 11:30:37 +0800519 }
520 }
Monk Liu65781c72017-05-11 13:36:44 +0800521 spin_unlock(&sched->job_list_lock);
Monk Liua8a51a72017-10-16 19:46:43 +0800522
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100523 if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
Monk Liucfb83b12017-11-08 14:35:04 +0800524 atomic_inc(&bad->karma);
Monk Liu48f05f22017-10-25 16:21:08 +0800525 /* don't increase @bad's karma if it's from KERNEL RQ,
526 * becuase sometimes GPU hang would cause kernel jobs (like VM updating jobs)
527 * corrupt but keep in mind that kernel jobs always considered good.
528 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100529 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL; i++ ) {
530 struct drm_sched_rq *rq = &sched->sched_rq[i];
Monk Liua8a51a72017-10-16 19:46:43 +0800531
532 spin_lock(&rq->lock);
533 list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
534 if (bad->s_fence->scheduled.context == entity->fence_context) {
Monk Liucfb83b12017-11-08 14:35:04 +0800535 if (atomic_read(&bad->karma) > bad->sched->hang_limit)
Monk Liu48f05f22017-10-25 16:21:08 +0800536 if (entity->guilty)
537 atomic_set(entity->guilty, 1);
Monk Liua8a51a72017-10-16 19:46:43 +0800538 break;
539 }
540 }
541 spin_unlock(&rq->lock);
Monk Liu48f05f22017-10-25 16:21:08 +0800542 if (&entity->list != &rq->entities)
Monk Liua8a51a72017-10-16 19:46:43 +0800543 break;
544 }
545 }
Monk Liu65781c72017-05-11 13:36:44 +0800546}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100547EXPORT_SYMBOL(drm_sched_hw_job_reset);
Monk Liu65781c72017-05-11 13:36:44 +0800548
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100549void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
Monk Liu65781c72017-05-11 13:36:44 +0800550{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100551 struct drm_sched_job *s_job, *tmp;
Monk Liu48f05f22017-10-25 16:21:08 +0800552 bool found_guilty = false;
Chunming Zhouec75f572016-06-29 15:23:55 +0800553 int r;
554
555 spin_lock(&sched->job_list_lock);
556 s_job = list_first_entry_or_null(&sched->ring_mirror_list,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100557 struct drm_sched_job, node);
Christian Königbdf00132016-08-16 19:52:35 +0200558 if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
Chunming Zhouec75f572016-06-29 15:23:55 +0800559 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
560
Chunming Zhou1c62cf92016-07-25 13:55:35 +0800561 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100562 struct drm_sched_fence *s_fence = s_job->s_fence;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100563 struct dma_fence *fence;
Monk Liu48f05f22017-10-25 16:21:08 +0800564 uint64_t guilty_context;
565
566 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
567 found_guilty = true;
568 guilty_context = s_job->s_fence->scheduled.context;
569 }
570
571 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
572 dma_fence_set_error(&s_fence->finished, -ECANCELED);
Chunming Zhoubdc2eea2016-07-22 13:01:02 +0800573
Chunming Zhou1c62cf92016-07-25 13:55:35 +0800574 spin_unlock(&sched->job_list_lock);
575 fence = sched->ops->run_job(s_job);
Chunming Zhoubdc2eea2016-07-22 13:01:02 +0800576 atomic_inc(&sched->hw_rq_count);
Emily Deng8ee3a522018-04-16 10:07:02 +0800577
578 dma_fence_put(s_job->entity->last_scheduled);
579 s_job->entity->last_scheduled = dma_fence_get(&s_fence->finished);
580
Chunming Zhouec75f572016-06-29 15:23:55 +0800581 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100582 s_fence->parent = dma_fence_get(fence);
583 r = dma_fence_add_callback(fence, &s_fence->cb,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100584 drm_sched_process_job);
Chunming Zhouec75f572016-06-29 15:23:55 +0800585 if (r == -ENOENT)
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100586 drm_sched_process_job(fence, &s_fence->cb);
Chunming Zhouec75f572016-06-29 15:23:55 +0800587 else if (r)
588 DRM_ERROR("fence add callback failed (%d)\n",
589 r);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100590 dma_fence_put(fence);
Chunming Zhouec75f572016-06-29 15:23:55 +0800591 } else {
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100592 drm_sched_process_job(NULL, &s_fence->cb);
Chunming Zhouec75f572016-06-29 15:23:55 +0800593 }
Chunming Zhou1c62cf92016-07-25 13:55:35 +0800594 spin_lock(&sched->job_list_lock);
Chunming Zhouec75f572016-06-29 15:23:55 +0800595 }
596 spin_unlock(&sched->job_list_lock);
597}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100598EXPORT_SYMBOL(drm_sched_job_recovery);
Chunming Zhouec75f572016-06-29 15:23:55 +0800599
Monk Liue6869412016-03-07 12:49:55 +0800600/* init a sched_job with basic field */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100601int drm_sched_job_init(struct drm_sched_job *job,
602 struct drm_gpu_scheduler *sched,
603 struct drm_sched_entity *entity,
Christian König595a9cd2016-06-30 10:52:03 +0200604 void *owner)
Monk Liue6869412016-03-07 12:49:55 +0800605{
606 job->sched = sched;
Emily Deng8ee3a522018-04-16 10:07:02 +0800607 job->entity = entity;
Andrey Grodzovskyd1f6dc12017-10-19 14:29:46 -0400608 job->s_priority = entity->rq - sched->sched_rq;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100609 job->s_fence = drm_sched_fence_create(entity, owner);
Monk Liue6869412016-03-07 12:49:55 +0800610 if (!job->s_fence)
611 return -ENOMEM;
Chunming Zhoucb3696f2017-05-09 15:34:07 +0800612 job->id = atomic64_inc_return(&sched->job_id_count);
Monk Liue6869412016-03-07 12:49:55 +0800613
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100614 INIT_WORK(&job->finish_work, drm_sched_job_finish);
Christian Königc5f74f72016-05-19 09:54:15 +0200615 INIT_LIST_HEAD(&job->node);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100616 INIT_DELAYED_WORK(&job->work_tdr, drm_sched_job_timedout);
Monk Liu48350962016-03-04 14:33:44 +0800617
Monk Liue6869412016-03-07 12:49:55 +0800618 return 0;
619}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100620EXPORT_SYMBOL(drm_sched_job_init);
Monk Liue6869412016-03-07 12:49:55 +0800621
Christian Könige688b7282015-08-20 17:01:01 +0200622/**
623 * Return ture if we can push more jobs to the hw.
624 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100625static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
Christian Könige688b7282015-08-20 17:01:01 +0200626{
627 return atomic_read(&sched->hw_rq_count) <
628 sched->hw_submission_limit;
629}
630
631/**
Christian König88079002015-08-24 14:29:40 +0200632 * Wake up the scheduler when it is ready
633 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100634static void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
Christian König88079002015-08-24 14:29:40 +0200635{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100636 if (drm_sched_ready(sched))
Christian Königc2b6bd72015-08-25 21:39:31 +0200637 wake_up_interruptible(&sched->wake_up_worker);
Christian König88079002015-08-24 14:29:40 +0200638}
639
640/**
Christian König3d651932015-11-12 21:10:35 +0100641 * Select next entity to process
Christian Könige688b7282015-08-20 17:01:01 +0200642*/
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100643static struct drm_sched_entity *
644drm_sched_select_entity(struct drm_gpu_scheduler *sched)
Christian Könige688b7282015-08-20 17:01:01 +0200645{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100646 struct drm_sched_entity *entity;
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800647 int i;
Christian Könige688b7282015-08-20 17:01:01 +0200648
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100649 if (!drm_sched_ready(sched))
Christian Könige688b7282015-08-20 17:01:01 +0200650 return NULL;
651
652 /* Kernel run queue has higher priority than normal run queue*/
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100653 for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
654 entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800655 if (entity)
656 break;
657 }
Christian Könige688b7282015-08-20 17:01:01 +0200658
Christian König3d651932015-11-12 21:10:35 +0100659 return entity;
Christian Könige688b7282015-08-20 17:01:01 +0200660}
661
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100662static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
Christian König6f0e54a2015-08-05 21:22:10 +0200663{
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100664 struct drm_sched_fence *s_fence =
665 container_of(cb, struct drm_sched_fence, cb);
666 struct drm_gpu_scheduler *sched = s_fence->sched;
Christian König6f0e54a2015-08-05 21:22:10 +0200667
Christian König7fd5e362017-10-13 10:58:15 +0200668 dma_fence_get(&s_fence->finished);
Christian Königc746ba22015-08-19 16:12:15 +0200669 atomic_dec(&sched->hw_rq_count);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100670 drm_sched_fence_finished(s_fence);
Monk Liucccd9bc2016-03-04 14:42:26 +0800671
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100672 trace_drm_sched_process_job(s_fence);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100673 dma_fence_put(&s_fence->finished);
Christian Königc2b6bd72015-08-25 21:39:31 +0200674 wake_up_interruptible(&sched->wake_up_worker);
Christian König6f0e54a2015-08-05 21:22:10 +0200675}
676
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100677static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
Chunming Zhou0875dc92016-06-12 15:41:58 +0800678{
679 if (kthread_should_park()) {
680 kthread_parkme();
681 return true;
682 }
683
684 return false;
685}
686
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100687static int drm_sched_main(void *param)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800688{
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800689 struct sched_param sparam = {.sched_priority = 1};
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100690 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
Andrey Grodzovsky83f4b112017-10-12 16:46:26 -0400691 int r;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800692
693 sched_setscheduler(current, SCHED_FIFO, &sparam);
694
695 while (!kthread_should_stop()) {
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100696 struct drm_sched_entity *entity = NULL;
697 struct drm_sched_fence *s_fence;
698 struct drm_sched_job *sched_job;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100699 struct dma_fence *fence;
Christian König6f0e54a2015-08-05 21:22:10 +0200700
Christian Königc2b6bd72015-08-25 21:39:31 +0200701 wait_event_interruptible(sched->wake_up_worker,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100702 (!drm_sched_blocked(sched) &&
703 (entity = drm_sched_select_entity(sched))) ||
Chunming Zhou0875dc92016-06-12 15:41:58 +0800704 kthread_should_stop());
Christian Königf85a6dd2015-08-19 17:37:52 +0200705
Christian König3d651932015-11-12 21:10:35 +0100706 if (!entity)
707 continue;
708
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100709 sched_job = drm_sched_entity_pop_job(entity);
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800710 if (!sched_job)
Christian Königf85a6dd2015-08-19 17:37:52 +0200711 continue;
712
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800713 s_fence = sched_job->s_fence;
Junwei Zhang2440ff22015-10-10 08:48:42 +0800714
Christian Königb034b572015-08-20 17:08:25 +0200715 atomic_inc(&sched->hw_rq_count);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100716 drm_sched_job_begin(sched_job);
Christian König7392c322016-05-18 13:00:38 +0200717
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800718 fence = sched->ops->run_job(sched_job);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100719 drm_sched_fence_scheduled(s_fence);
Nicolai Hähnle29d25352017-09-28 11:51:32 +0200720
Christian König6f0e54a2015-08-05 21:22:10 +0200721 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100722 s_fence->parent = dma_fence_get(fence);
723 r = dma_fence_add_callback(fence, &s_fence->cb,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100724 drm_sched_process_job);
Christian König6f0e54a2015-08-05 21:22:10 +0200725 if (r == -ENOENT)
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100726 drm_sched_process_job(fence, &s_fence->cb);
Christian König6f0e54a2015-08-05 21:22:10 +0200727 else if (r)
Christian König16a71332016-05-18 09:43:07 +0200728 DRM_ERROR("fence add callback failed (%d)\n",
729 r);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100730 dma_fence_put(fence);
Christian König27439fc2015-09-02 12:03:06 +0200731 } else {
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100732 drm_sched_process_job(NULL, &s_fence->cb);
Christian König6f0e54a2015-08-05 21:22:10 +0200733 }
Christian Königaef48522015-08-20 14:47:46 +0200734
Christian Königc2b6bd72015-08-25 21:39:31 +0200735 wake_up(&sched->job_scheduled);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800736 }
737 return 0;
738}
739
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800740/**
Christian König4f839a22015-09-08 20:22:31 +0200741 * Init a gpu scheduler instance
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800742 *
Christian König4f839a22015-09-08 20:22:31 +0200743 * @sched The pointer to the scheduler
Christian König69f7dd62015-08-20 17:24:40 +0200744 * @ops The backend operations for this scheduler.
Christian König69f7dd62015-08-20 17:24:40 +0200745 * @hw_submissions Number of hw submissions to do.
Christian König4f839a22015-09-08 20:22:31 +0200746 * @name Name used for debugging
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800747 *
Christian König4f839a22015-09-08 20:22:31 +0200748 * Return 0 on success, otherwise error code.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800749*/
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100750int drm_sched_init(struct drm_gpu_scheduler *sched,
751 const struct drm_sched_backend_ops *ops,
Monk Liu95aa9b12017-10-17 13:40:54 +0800752 unsigned hw_submission,
753 unsigned hang_limit,
754 long timeout,
755 const char *name)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800756{
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800757 int i;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800758 sched->ops = ops;
Chunming Zhou4cef9262015-08-05 19:52:14 +0800759 sched->hw_submission_limit = hw_submission;
Christian König4f839a22015-09-08 20:22:31 +0200760 sched->name = name;
Junwei Zhang2440ff22015-10-10 08:48:42 +0800761 sched->timeout = timeout;
Monk Liu95aa9b12017-10-17 13:40:54 +0800762 sched->hang_limit = hang_limit;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100763 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
764 drm_sched_rq_init(&sched->sched_rq[i]);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800765
Christian Königc2b6bd72015-08-25 21:39:31 +0200766 init_waitqueue_head(&sched->wake_up_worker);
767 init_waitqueue_head(&sched->job_scheduled);
Monk Liu48350962016-03-04 14:33:44 +0800768 INIT_LIST_HEAD(&sched->ring_mirror_list);
769 spin_lock_init(&sched->job_list_lock);
Christian Königc746ba22015-08-19 16:12:15 +0200770 atomic_set(&sched->hw_rq_count, 0);
Andres Rodriguez93f8b362017-03-09 21:25:50 -0500771 atomic64_set(&sched->job_id_count, 0);
Christian König4f839a22015-09-08 20:22:31 +0200772
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800773 /* Each scheduler will run on a seperate kernel thread */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100774 sched->thread = kthread_run(drm_sched_main, sched, sched->name);
Christian Königf4956592015-08-20 16:59:38 +0200775 if (IS_ERR(sched->thread)) {
Christian König4f839a22015-09-08 20:22:31 +0200776 DRM_ERROR("Failed to create scheduler for %s.\n", name);
777 return PTR_ERR(sched->thread);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800778 }
779
Christian König4f839a22015-09-08 20:22:31 +0200780 return 0;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800781}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100782EXPORT_SYMBOL(drm_sched_init);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800783
784/**
785 * Destroy a gpu scheduler
786 *
787 * @sched The pointer to the scheduler
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800788 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100789void drm_sched_fini(struct drm_gpu_scheduler *sched)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800790{
Dave Airlie32544d02015-11-03 11:10:03 -0500791 if (sched->thread)
792 kthread_stop(sched->thread);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800793}
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100794EXPORT_SYMBOL(drm_sched_fini);