blob: 1474866d9048dbf9d65f842f017b91035b5ef8a2 [file] [log] [blame]
Jammy Zhoua72ce6f2015-05-22 18:55:07 +08001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
Ingo Molnarae7e81c2017-02-01 18:07:51 +010027#include <uapi/linux/sched/types.h>
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080028#include <drm/drmP.h>
29#include "gpu_scheduler.h"
30
Chunming Zhou353da3c2015-09-07 16:06:53 +080031#define CREATE_TRACE_POINTS
32#include "gpu_sched_trace.h"
33
Christian König3d651932015-11-12 21:10:35 +010034static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
Christian König88079002015-08-24 14:29:40 +020035static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
Chris Wilsonf54d1862016-10-25 13:00:45 +010036static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
Christian König88079002015-08-24 14:29:40 +020037
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080038/* Initialize a given run queue struct */
Christian König432a4ff2015-08-12 11:46:04 +020039static void amd_sched_rq_init(struct amd_sched_rq *rq)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080040{
Christian König2b184d82015-08-18 14:41:25 +020041 spin_lock_init(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020042 INIT_LIST_HEAD(&rq->entities);
Christian König432a4ff2015-08-12 11:46:04 +020043 rq->current_entity = NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080044}
45
Christian König432a4ff2015-08-12 11:46:04 +020046static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
47 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080048{
Chunming Zhoue8deea22015-12-11 18:22:52 +080049 if (!list_empty(&entity->list))
50 return;
Christian König2b184d82015-08-18 14:41:25 +020051 spin_lock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020052 list_add_tail(&entity->list, &rq->entities);
Christian König2b184d82015-08-18 14:41:25 +020053 spin_unlock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020054}
55
56static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
57 struct amd_sched_entity *entity)
58{
Chunming Zhoue8deea22015-12-11 18:22:52 +080059 if (list_empty(&entity->list))
60 return;
Christian König2b184d82015-08-18 14:41:25 +020061 spin_lock(&rq->lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080062 list_del_init(&entity->list);
Christian König432a4ff2015-08-12 11:46:04 +020063 if (rq->current_entity == entity)
64 rq->current_entity = NULL;
Christian König2b184d82015-08-18 14:41:25 +020065 spin_unlock(&rq->lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080066}
67
68/**
Christian König3d651932015-11-12 21:10:35 +010069 * Select an entity which could provide a job to run
70 *
71 * @rq The run queue to check.
72 *
73 * Try to find a ready entity, returns NULL if none found.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080074 */
Christian König3d651932015-11-12 21:10:35 +010075static struct amd_sched_entity *
76amd_sched_rq_select_entity(struct amd_sched_rq *rq)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080077{
Christian König2b184d82015-08-18 14:41:25 +020078 struct amd_sched_entity *entity;
Christian König4cd7f42c2015-08-05 18:18:52 +020079
Christian König2b184d82015-08-18 14:41:25 +020080 spin_lock(&rq->lock);
81
82 entity = rq->current_entity;
Christian König432a4ff2015-08-12 11:46:04 +020083 if (entity) {
84 list_for_each_entry_continue(entity, &rq->entities, list) {
Christian König3d651932015-11-12 21:10:35 +010085 if (amd_sched_entity_is_ready(entity)) {
Christian König432a4ff2015-08-12 11:46:04 +020086 rq->current_entity = entity;
Christian König2b184d82015-08-18 14:41:25 +020087 spin_unlock(&rq->lock);
Christian König3d651932015-11-12 21:10:35 +010088 return entity;
Christian König432a4ff2015-08-12 11:46:04 +020089 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080090 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080091 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080092
Christian König432a4ff2015-08-12 11:46:04 +020093 list_for_each_entry(entity, &rq->entities, list) {
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080094
Christian König3d651932015-11-12 21:10:35 +010095 if (amd_sched_entity_is_ready(entity)) {
Christian König432a4ff2015-08-12 11:46:04 +020096 rq->current_entity = entity;
Christian König2b184d82015-08-18 14:41:25 +020097 spin_unlock(&rq->lock);
Christian König3d651932015-11-12 21:10:35 +010098 return entity;
Christian König432a4ff2015-08-12 11:46:04 +020099 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800100
Christian König432a4ff2015-08-12 11:46:04 +0200101 if (entity == rq->current_entity)
102 break;
103 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800104
Christian König2b184d82015-08-18 14:41:25 +0200105 spin_unlock(&rq->lock);
106
Christian König432a4ff2015-08-12 11:46:04 +0200107 return NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800108}
109
110/**
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800111 * Init a context entity used by scheduler when submit to HW ring.
112 *
113 * @sched The pointer to the scheduler
Christian König91404fb2015-08-05 18:33:21 +0200114 * @entity The pointer to a valid amd_sched_entity
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800115 * @rq The run queue this entity belongs
Christian König0e89d0c2015-08-04 16:58:36 +0200116 * @kernel If this is an entity for the kernel
Jammy Zhou1333f722015-07-30 16:36:58 +0800117 * @jobs The max number of jobs in the job queue
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800118 *
119 * return 0 if succeed. negative error code on failure
120*/
Christian König91404fb2015-08-05 18:33:21 +0200121int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
Christian König6f0e54a2015-08-05 21:22:10 +0200122 struct amd_sched_entity *entity,
Christian König432a4ff2015-08-12 11:46:04 +0200123 struct amd_sched_rq *rq,
Monk Liub3eebe32017-10-23 12:23:29 +0800124 uint32_t jobs, atomic_t *guilty)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800125{
Christian König0f75aee2015-09-07 18:07:14 +0200126 int r;
127
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800128 if (!(sched && entity && rq))
129 return -EINVAL;
130
Christian König91404fb2015-08-05 18:33:21 +0200131 memset(entity, 0, sizeof(struct amd_sched_entity));
Christian König0f75aee2015-09-07 18:07:14 +0200132 INIT_LIST_HEAD(&entity->list);
133 entity->rq = rq;
134 entity->sched = sched;
Monk Liub3eebe32017-10-23 12:23:29 +0800135 entity->guilty = guilty;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800136
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400137 spin_lock_init(&entity->rq_lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800138 spin_lock_init(&entity->queue_lock);
Christian König0f75aee2015-09-07 18:07:14 +0200139 r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
140 if (r)
141 return r;
142
Christian Königce882e62015-08-19 15:00:55 +0200143 atomic_set(&entity->fence_seq, 0);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100144 entity->fence_context = dma_fence_context_alloc(2);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800145
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800146 return 0;
147}
148
149/**
150 * Query if entity is initialized
151 *
152 * @sched Pointer to scheduler instance
153 * @entity The pointer to a valid scheduler entity
154 *
155 * return true if entity is initialized, false otherwise
156*/
Christian Königd54fdb92015-08-20 17:03:48 +0200157static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
158 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800159{
Christian König0f75aee2015-09-07 18:07:14 +0200160 return entity->sched == sched &&
161 entity->rq != NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800162}
163
Christian Königaef48522015-08-20 14:47:46 +0200164/**
165 * Check if entity is idle
166 *
167 * @entity The pointer to a valid scheduler entity
168 *
169 * Return true if entity don't has any unscheduled jobs.
170 */
171static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800172{
Christian Königaef48522015-08-20 14:47:46 +0200173 rmb();
174 if (kfifo_is_empty(&entity->job_queue))
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800175 return true;
176
177 return false;
178}
179
180/**
Christian König3d651932015-11-12 21:10:35 +0100181 * Check if entity is ready
182 *
183 * @entity The pointer to a valid scheduler entity
184 *
185 * Return true if entity could provide a job.
186 */
187static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
188{
189 if (kfifo_is_empty(&entity->job_queue))
190 return false;
191
Mark Rutland6aa7de02017-10-23 14:07:29 -0700192 if (READ_ONCE(entity->dependency))
Christian König3d651932015-11-12 21:10:35 +0100193 return false;
194
195 return true;
196}
197
198/**
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800199 * Destroy a context entity
200 *
201 * @sched Pointer to scheduler instance
202 * @entity The pointer to a valid scheduler entity
203 *
Christian König062c7fb2015-08-21 15:46:43 +0200204 * Cleanup and free the allocated resources.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800205 */
Christian König062c7fb2015-08-21 15:46:43 +0200206void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
207 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800208{
Christian Königf0694d32017-08-21 14:27:51 +0200209 int r;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800210
Christian Königd54fdb92015-08-20 17:03:48 +0200211 if (!amd_sched_entity_is_initialized(sched, entity))
Christian König062c7fb2015-08-21 15:46:43 +0200212 return;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800213 /**
214 * The client will not queue more IBs during this fini, consume existing
Christian Königf0694d32017-08-21 14:27:51 +0200215 * queued IBs or discard them on SIGKILL
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800216 */
Christian Königf0694d32017-08-21 14:27:51 +0200217 if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
218 r = -ERESTARTSYS;
219 else
220 r = wait_event_killable(sched->job_scheduled,
221 amd_sched_entity_is_idle(entity));
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400222 amd_sched_entity_set_rq(entity, NULL);
Christian Königf0694d32017-08-21 14:27:51 +0200223 if (r) {
224 struct amd_sched_job *job;
Alex Deucherc9450122017-10-12 13:08:48 -0400225
Christian Königf0694d32017-08-21 14:27:51 +0200226 /* Park the kernel for a moment to make sure it isn't processing
227 * our enity.
228 */
229 kthread_park(sched->thread);
230 kthread_unpark(sched->thread);
Nicolai Hähnle79867462017-09-28 11:57:32 +0200231 while (kfifo_out(&entity->job_queue, &job, sizeof(job))) {
232 struct amd_sched_fence *s_fence = job->s_fence;
233 amd_sched_fence_scheduled(s_fence);
234 dma_fence_set_error(&s_fence->finished, -ESRCH);
235 amd_sched_fence_finished(s_fence);
236 dma_fence_put(&s_fence->finished);
Christian Königf0694d32017-08-21 14:27:51 +0200237 sched->ops->free_job(job);
Nicolai Hähnle79867462017-09-28 11:57:32 +0200238 }
Christian Königf0694d32017-08-21 14:27:51 +0200239
240 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800241 kfifo_free(&entity->job_queue);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800242}
243
Chris Wilsonf54d1862016-10-25 13:00:45 +0100244static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
Christian Könige61235d2015-08-25 11:05:36 +0200245{
246 struct amd_sched_entity *entity =
247 container_of(cb, struct amd_sched_entity, cb);
248 entity->dependency = NULL;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100249 dma_fence_put(f);
Christian König0f75aee2015-09-07 18:07:14 +0200250 amd_sched_wakeup(entity->sched);
Christian Könige61235d2015-08-25 11:05:36 +0200251}
252
Chris Wilsonf54d1862016-10-25 13:00:45 +0100253static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
Monk Liu777dbd42016-01-26 14:59:57 +0800254{
255 struct amd_sched_entity *entity =
256 container_of(cb, struct amd_sched_entity, cb);
257 entity->dependency = NULL;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100258 dma_fence_put(f);
Monk Liu777dbd42016-01-26 14:59:57 +0800259}
260
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400261void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
262 struct amd_sched_rq *rq)
263{
264 if (entity->rq == rq)
265 return;
266
267 spin_lock(&entity->rq_lock);
268
269 if (entity->rq)
270 amd_sched_rq_remove_entity(entity->rq, entity);
271
272 entity->rq = rq;
273 if (rq)
274 amd_sched_rq_add_entity(rq, entity);
275
276 spin_unlock(&entity->rq_lock);
277}
278
Chunming Zhou30514de2017-05-09 13:39:40 +0800279bool amd_sched_dependency_optimized(struct dma_fence* fence,
280 struct amd_sched_entity *entity)
281{
282 struct amd_gpu_scheduler *sched = entity->sched;
283 struct amd_sched_fence *s_fence;
284
285 if (!fence || dma_fence_is_signaled(fence))
286 return false;
287 if (fence->context == entity->fence_context)
288 return true;
289 s_fence = to_amd_sched_fence(fence);
290 if (s_fence && s_fence->sched == sched)
291 return true;
292
293 return false;
294}
295
Christian König393a0bd2015-11-05 12:57:10 +0100296static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
297{
298 struct amd_gpu_scheduler *sched = entity->sched;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100299 struct dma_fence * fence = entity->dependency;
Christian König393a0bd2015-11-05 12:57:10 +0100300 struct amd_sched_fence *s_fence;
301
302 if (fence->context == entity->fence_context) {
303 /* We can ignore fences from ourself */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100304 dma_fence_put(entity->dependency);
Christian König393a0bd2015-11-05 12:57:10 +0100305 return false;
306 }
307
308 s_fence = to_amd_sched_fence(fence);
309 if (s_fence && s_fence->sched == sched) {
Christian König393a0bd2015-11-05 12:57:10 +0100310
Christian König6fc13672016-05-20 12:53:52 +0200311 /*
312 * Fence is from the same scheduler, only need to wait for
313 * it to be scheduled
314 */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100315 fence = dma_fence_get(&s_fence->scheduled);
316 dma_fence_put(entity->dependency);
Christian König6fc13672016-05-20 12:53:52 +0200317 entity->dependency = fence;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100318 if (!dma_fence_add_callback(fence, &entity->cb,
319 amd_sched_entity_clear_dep))
Christian König6fc13672016-05-20 12:53:52 +0200320 return true;
321
322 /* Ignore it when it is already scheduled */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100323 dma_fence_put(fence);
Christian König6fc13672016-05-20 12:53:52 +0200324 return false;
Christian König393a0bd2015-11-05 12:57:10 +0100325 }
326
Chris Wilsonf54d1862016-10-25 13:00:45 +0100327 if (!dma_fence_add_callback(entity->dependency, &entity->cb,
328 amd_sched_entity_wakeup))
Christian König393a0bd2015-11-05 12:57:10 +0100329 return true;
330
Chris Wilsonf54d1862016-10-25 13:00:45 +0100331 dma_fence_put(entity->dependency);
Christian König393a0bd2015-11-05 12:57:10 +0100332 return false;
333}
334
Christian König69bd5bf2015-08-26 11:31:23 +0200335static struct amd_sched_job *
Nicolai Hähnle515c6fa2017-09-28 11:21:15 +0200336amd_sched_entity_peek_job(struct amd_sched_entity *entity)
Christian König69bd5bf2015-08-26 11:31:23 +0200337{
Christian König0f75aee2015-09-07 18:07:14 +0200338 struct amd_gpu_scheduler *sched = entity->sched;
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800339 struct amd_sched_job *sched_job;
Christian König69bd5bf2015-08-26 11:31:23 +0200340
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800341 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
Christian König69bd5bf2015-08-26 11:31:23 +0200342 return NULL;
343
Christian König393a0bd2015-11-05 12:57:10 +0100344 while ((entity->dependency = sched->ops->dependency(sched_job)))
345 if (amd_sched_entity_add_dependency_cb(entity))
Christian Könige61235d2015-08-25 11:05:36 +0200346 return NULL;
Christian Könige61235d2015-08-25 11:05:36 +0200347
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800348 return sched_job;
Christian König69bd5bf2015-08-26 11:31:23 +0200349}
350
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800351/**
Christian König6c859272015-08-20 16:12:50 +0200352 * Helper to submit a job to the job queue
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800353 *
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800354 * @sched_job The pointer to job required to submit
Christian König6c859272015-08-20 16:12:50 +0200355 *
356 * Returns true if we could submit the job.
357 */
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800358static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800359{
Nicolai Hähnle786b5212015-12-02 17:35:12 +0100360 struct amd_gpu_scheduler *sched = sched_job->sched;
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800361 struct amd_sched_entity *entity = sched_job->s_entity;
Christian König6c859272015-08-20 16:12:50 +0200362 bool added, first = false;
363
364 spin_lock(&entity->queue_lock);
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800365 added = kfifo_in(&entity->job_queue, &sched_job,
366 sizeof(sched_job)) == sizeof(sched_job);
Christian König6c859272015-08-20 16:12:50 +0200367
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800368 if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
Christian König6c859272015-08-20 16:12:50 +0200369 first = true;
370
371 spin_unlock(&entity->queue_lock);
372
373 /* first job wakes up scheduler */
Chunming Zhoue8deea22015-12-11 18:22:52 +0800374 if (first) {
375 /* Add the entity to the run queue */
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400376 spin_lock(&entity->rq_lock);
Chunming Zhoue8deea22015-12-11 18:22:52 +0800377 amd_sched_rq_add_entity(entity->rq, entity);
Andres Rodriguez9ebbaab2017-06-02 15:09:00 -0400378 spin_unlock(&entity->rq_lock);
Nicolai Hähnle786b5212015-12-02 17:35:12 +0100379 amd_sched_wakeup(sched);
Chunming Zhoue8deea22015-12-11 18:22:52 +0800380 }
Christian König6c859272015-08-20 16:12:50 +0200381 return added;
382}
383
Nicolai Hähnle1650c142017-09-28 11:35:05 +0200384/* job_finish is called after hw fence signaled
Monk Liu0de24792016-03-04 18:51:02 +0800385 */
Christian Königc5f74f72016-05-19 09:54:15 +0200386static void amd_sched_job_finish(struct work_struct *work)
Monk Liu0de24792016-03-04 18:51:02 +0800387{
Christian Königc5f74f72016-05-19 09:54:15 +0200388 struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
389 finish_work);
Monk Liu0de24792016-03-04 18:51:02 +0800390 struct amd_gpu_scheduler *sched = s_job->sched;
391
Christian Königf42d20a92016-05-18 15:40:58 +0200392 /* remove job from ring_mirror_list */
Christian König1059e112016-06-13 16:12:43 +0200393 spin_lock(&sched->job_list_lock);
Christian Königf42d20a92016-05-18 15:40:58 +0200394 list_del_init(&s_job->node);
Monk Liu0de24792016-03-04 18:51:02 +0800395 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
Christian Königc5f74f72016-05-19 09:54:15 +0200396 struct amd_sched_job *next;
397
Christian König1059e112016-06-13 16:12:43 +0200398 spin_unlock(&sched->job_list_lock);
Christian Königc5f74f72016-05-19 09:54:15 +0200399 cancel_delayed_work_sync(&s_job->work_tdr);
Christian König1059e112016-06-13 16:12:43 +0200400 spin_lock(&sched->job_list_lock);
Monk Liu0de24792016-03-04 18:51:02 +0800401
402 /* queue TDR for next job */
403 next = list_first_entry_or_null(&sched->ring_mirror_list,
404 struct amd_sched_job, node);
405
Christian Königc5f74f72016-05-19 09:54:15 +0200406 if (next)
Monk Liu0de24792016-03-04 18:51:02 +0800407 schedule_delayed_work(&next->work_tdr, sched->timeout);
Monk Liu0de24792016-03-04 18:51:02 +0800408 }
Christian König1059e112016-06-13 16:12:43 +0200409 spin_unlock(&sched->job_list_lock);
Christian König7fd5e362017-10-13 10:58:15 +0200410 dma_fence_put(&s_job->s_fence->finished);
Christian Königc5f74f72016-05-19 09:54:15 +0200411 sched->ops->free_job(s_job);
412}
413
Chris Wilsonf54d1862016-10-25 13:00:45 +0100414static void amd_sched_job_finish_cb(struct dma_fence *f,
415 struct dma_fence_cb *cb)
Christian Königc5f74f72016-05-19 09:54:15 +0200416{
417 struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
418 finish_cb);
419 schedule_work(&job->finish_work);
Monk Liu0de24792016-03-04 18:51:02 +0800420}
421
Christian König7392c322016-05-18 13:00:38 +0200422static void amd_sched_job_begin(struct amd_sched_job *s_job)
Monk Liu0de24792016-03-04 18:51:02 +0800423{
424 struct amd_gpu_scheduler *sched = s_job->sched;
425
Nicolai Hähnle214a91e2017-09-28 11:37:02 +0200426 dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
427 amd_sched_job_finish_cb);
428
Christian König1059e112016-06-13 16:12:43 +0200429 spin_lock(&sched->job_list_lock);
Christian Königf42d20a92016-05-18 15:40:58 +0200430 list_add_tail(&s_job->node, &sched->ring_mirror_list);
Monk Liu0de24792016-03-04 18:51:02 +0800431 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
Christian König16a71332016-05-18 09:43:07 +0200432 list_first_entry_or_null(&sched->ring_mirror_list,
433 struct amd_sched_job, node) == s_job)
Monk Liu0de24792016-03-04 18:51:02 +0800434 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
Christian König1059e112016-06-13 16:12:43 +0200435 spin_unlock(&sched->job_list_lock);
Monk Liu0de24792016-03-04 18:51:02 +0800436}
437
Christian König0e51a772016-05-18 14:19:32 +0200438static void amd_sched_job_timedout(struct work_struct *work)
439{
440 struct amd_sched_job *job = container_of(work, struct amd_sched_job,
441 work_tdr.work);
442
443 job->sched->ops->timedout_job(job);
444}
445
Monk Liua8a51a72017-10-16 19:46:43 +0800446static void amd_sched_set_guilty(struct amd_sched_job *s_job)
447{
448 if (atomic_inc_return(&s_job->karma) > s_job->sched->hang_limit)
449 if (s_job->s_entity->guilty)
450 atomic_set(s_job->s_entity->guilty, 1);
451}
452
453void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_job *bad)
Chunming Zhoue686e752016-06-30 11:30:37 +0800454{
455 struct amd_sched_job *s_job;
Monk Liua8a51a72017-10-16 19:46:43 +0800456 struct amd_sched_entity *entity, *tmp;
457 int i;;
Chunming Zhoue686e752016-06-30 11:30:37 +0800458
459 spin_lock(&sched->job_list_lock);
460 list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
Chunming Zhoua6bef672017-04-24 17:39:00 +0800461 if (s_job->s_fence->parent &&
462 dma_fence_remove_callback(s_job->s_fence->parent,
463 &s_job->s_fence->cb)) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100464 dma_fence_put(s_job->s_fence->parent);
Chunming Zhoue686e752016-06-30 11:30:37 +0800465 s_job->s_fence->parent = NULL;
Monk Liu65781c72017-05-11 13:36:44 +0800466 atomic_dec(&sched->hw_rq_count);
Chunming Zhoue686e752016-06-30 11:30:37 +0800467 }
468 }
Monk Liu65781c72017-05-11 13:36:44 +0800469 spin_unlock(&sched->job_list_lock);
Monk Liua8a51a72017-10-16 19:46:43 +0800470
471 if (bad) {
472 bool found = false;
473
474 for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++ ) {
475 struct amd_sched_rq *rq = &sched->sched_rq[i];
476
477 spin_lock(&rq->lock);
478 list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
479 if (bad->s_fence->scheduled.context == entity->fence_context) {
480 found = true;
481 amd_sched_set_guilty(bad);
482 break;
483 }
484 }
485 spin_unlock(&rq->lock);
486 if (found)
487 break;
488 }
489 }
Monk Liu65781c72017-05-11 13:36:44 +0800490}
491
492void amd_sched_job_kickout(struct amd_sched_job *s_job)
493{
494 struct amd_gpu_scheduler *sched = s_job->sched;
495
496 spin_lock(&sched->job_list_lock);
497 list_del_init(&s_job->node);
Chunming Zhoue686e752016-06-30 11:30:37 +0800498 spin_unlock(&sched->job_list_lock);
499}
500
Chunming Zhouec75f572016-06-29 15:23:55 +0800501void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
502{
Chunming Zhou1c62cf92016-07-25 13:55:35 +0800503 struct amd_sched_job *s_job, *tmp;
Chunming Zhouec75f572016-06-29 15:23:55 +0800504 int r;
505
506 spin_lock(&sched->job_list_lock);
507 s_job = list_first_entry_or_null(&sched->ring_mirror_list,
508 struct amd_sched_job, node);
Christian Königbdf00132016-08-16 19:52:35 +0200509 if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
Chunming Zhouec75f572016-06-29 15:23:55 +0800510 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
511
Chunming Zhou1c62cf92016-07-25 13:55:35 +0800512 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
Chunming Zhouec75f572016-06-29 15:23:55 +0800513 struct amd_sched_fence *s_fence = s_job->s_fence;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100514 struct dma_fence *fence;
Chunming Zhoubdc2eea2016-07-22 13:01:02 +0800515
Chunming Zhou1c62cf92016-07-25 13:55:35 +0800516 spin_unlock(&sched->job_list_lock);
517 fence = sched->ops->run_job(s_job);
Chunming Zhoubdc2eea2016-07-22 13:01:02 +0800518 atomic_inc(&sched->hw_rq_count);
Chunming Zhouec75f572016-06-29 15:23:55 +0800519 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100520 s_fence->parent = dma_fence_get(fence);
521 r = dma_fence_add_callback(fence, &s_fence->cb,
522 amd_sched_process_job);
Chunming Zhouec75f572016-06-29 15:23:55 +0800523 if (r == -ENOENT)
524 amd_sched_process_job(fence, &s_fence->cb);
525 else if (r)
526 DRM_ERROR("fence add callback failed (%d)\n",
527 r);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100528 dma_fence_put(fence);
Chunming Zhouec75f572016-06-29 15:23:55 +0800529 } else {
530 DRM_ERROR("Failed to run job!\n");
531 amd_sched_process_job(NULL, &s_fence->cb);
532 }
Chunming Zhou1c62cf92016-07-25 13:55:35 +0800533 spin_lock(&sched->job_list_lock);
Chunming Zhouec75f572016-06-29 15:23:55 +0800534 }
535 spin_unlock(&sched->job_list_lock);
536}
537
Christian König6c859272015-08-20 16:12:50 +0200538/**
539 * Submit a job to the job queue
540 *
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800541 * @sched_job The pointer to job required to submit
Christian König6c859272015-08-20 16:12:50 +0200542 *
543 * Returns 0 for success, negative error code otherwise.
544 */
Christian Könige2840222015-11-05 19:49:48 +0100545void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
Christian König6c859272015-08-20 16:12:50 +0200546{
547 struct amd_sched_entity *entity = sched_job->s_entity;
Christian König6c859272015-08-20 16:12:50 +0200548
Nicolai Hähnle786b5212015-12-02 17:35:12 +0100549 trace_amd_sched_job(sched_job);
Christian König0f75aee2015-09-07 18:07:14 +0200550 wait_event(entity->sched->job_scheduled,
Chunming Zhouc9f0fe52015-08-31 15:46:12 +0800551 amd_sched_entity_in(sched_job));
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800552}
553
Monk Liue6869412016-03-07 12:49:55 +0800554/* init a sched_job with basic field */
555int amd_sched_job_init(struct amd_sched_job *job,
Christian König16a71332016-05-18 09:43:07 +0200556 struct amd_gpu_scheduler *sched,
557 struct amd_sched_entity *entity,
Christian König595a9cd2016-06-30 10:52:03 +0200558 void *owner)
Monk Liue6869412016-03-07 12:49:55 +0800559{
560 job->sched = sched;
561 job->s_entity = entity;
Andrey Grodzovskyd1f6dc12017-10-19 14:29:46 -0400562 job->s_priority = entity->rq - sched->sched_rq;
Monk Liue6869412016-03-07 12:49:55 +0800563 job->s_fence = amd_sched_fence_create(entity, owner);
564 if (!job->s_fence)
565 return -ENOMEM;
Chunming Zhoucb3696f2017-05-09 15:34:07 +0800566 job->id = atomic64_inc_return(&sched->job_id_count);
Monk Liue6869412016-03-07 12:49:55 +0800567
Christian Königc5f74f72016-05-19 09:54:15 +0200568 INIT_WORK(&job->finish_work, amd_sched_job_finish);
569 INIT_LIST_HEAD(&job->node);
Christian König0e51a772016-05-18 14:19:32 +0200570 INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
Monk Liu48350962016-03-04 14:33:44 +0800571
Monk Liue6869412016-03-07 12:49:55 +0800572 return 0;
573}
574
Christian Könige688b7282015-08-20 17:01:01 +0200575/**
576 * Return ture if we can push more jobs to the hw.
577 */
578static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
579{
580 return atomic_read(&sched->hw_rq_count) <
581 sched->hw_submission_limit;
582}
583
584/**
Christian König88079002015-08-24 14:29:40 +0200585 * Wake up the scheduler when it is ready
586 */
587static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
588{
589 if (amd_sched_ready(sched))
Christian Königc2b6bd72015-08-25 21:39:31 +0200590 wake_up_interruptible(&sched->wake_up_worker);
Christian König88079002015-08-24 14:29:40 +0200591}
592
593/**
Christian König3d651932015-11-12 21:10:35 +0100594 * Select next entity to process
Christian Könige688b7282015-08-20 17:01:01 +0200595*/
Christian König3d651932015-11-12 21:10:35 +0100596static struct amd_sched_entity *
597amd_sched_select_entity(struct amd_gpu_scheduler *sched)
Christian Könige688b7282015-08-20 17:01:01 +0200598{
Christian König3d651932015-11-12 21:10:35 +0100599 struct amd_sched_entity *entity;
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800600 int i;
Christian Könige688b7282015-08-20 17:01:01 +0200601
602 if (!amd_sched_ready(sched))
603 return NULL;
604
605 /* Kernel run queue has higher priority than normal run queue*/
Chunming Zhou153de9d2017-03-16 11:44:49 +0800606 for (i = AMD_SCHED_PRIORITY_MAX - 1; i >= AMD_SCHED_PRIORITY_MIN; i--) {
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800607 entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
608 if (entity)
609 break;
610 }
Christian Könige688b7282015-08-20 17:01:01 +0200611
Christian König3d651932015-11-12 21:10:35 +0100612 return entity;
Christian Könige688b7282015-08-20 17:01:01 +0200613}
614
Chris Wilsonf54d1862016-10-25 13:00:45 +0100615static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
Christian König6f0e54a2015-08-05 21:22:10 +0200616{
Christian König258f3f92015-08-31 17:02:52 +0200617 struct amd_sched_fence *s_fence =
618 container_of(cb, struct amd_sched_fence, cb);
Christian König9b398fa2015-09-07 18:16:49 +0200619 struct amd_gpu_scheduler *sched = s_fence->sched;
Christian König6f0e54a2015-08-05 21:22:10 +0200620
Christian König7fd5e362017-10-13 10:58:15 +0200621 dma_fence_get(&s_fence->finished);
Christian Königc746ba22015-08-19 16:12:15 +0200622 atomic_dec(&sched->hw_rq_count);
Christian König6fc13672016-05-20 12:53:52 +0200623 amd_sched_fence_finished(s_fence);
Monk Liucccd9bc2016-03-04 14:42:26 +0800624
Chunming Zhou7034dec2015-11-11 14:56:00 +0800625 trace_amd_sched_process_job(s_fence);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100626 dma_fence_put(&s_fence->finished);
Christian Königc2b6bd72015-08-25 21:39:31 +0200627 wake_up_interruptible(&sched->wake_up_worker);
Christian König6f0e54a2015-08-05 21:22:10 +0200628}
629
Chunming Zhou0875dc92016-06-12 15:41:58 +0800630static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
631{
632 if (kthread_should_park()) {
633 kthread_parkme();
634 return true;
635 }
636
637 return false;
638}
639
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800640static int amd_sched_main(void *param)
641{
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800642 struct sched_param sparam = {.sched_priority = 1};
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800643 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
Alex Deucher5134e992015-09-04 00:11:54 -0400644 int r, count;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800645
646 sched_setscheduler(current, SCHED_FIFO, &sparam);
647
648 while (!kthread_should_stop()) {
Chunming Zhou0875dc92016-06-12 15:41:58 +0800649 struct amd_sched_entity *entity = NULL;
Christian König258f3f92015-08-31 17:02:52 +0200650 struct amd_sched_fence *s_fence;
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800651 struct amd_sched_job *sched_job;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100652 struct dma_fence *fence;
Christian König6f0e54a2015-08-05 21:22:10 +0200653
Christian Königc2b6bd72015-08-25 21:39:31 +0200654 wait_event_interruptible(sched->wake_up_worker,
Chunming Zhou0875dc92016-06-12 15:41:58 +0800655 (!amd_sched_blocked(sched) &&
656 (entity = amd_sched_select_entity(sched))) ||
657 kthread_should_stop());
Christian Königf85a6dd2015-08-19 17:37:52 +0200658
Christian König3d651932015-11-12 21:10:35 +0100659 if (!entity)
660 continue;
661
Nicolai Hähnle515c6fa2017-09-28 11:21:15 +0200662 sched_job = amd_sched_entity_peek_job(entity);
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800663 if (!sched_job)
Christian Königf85a6dd2015-08-19 17:37:52 +0200664 continue;
665
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800666 s_fence = sched_job->s_fence;
Junwei Zhang2440ff22015-10-10 08:48:42 +0800667
Christian Königb034b572015-08-20 17:08:25 +0200668 atomic_inc(&sched->hw_rq_count);
Christian König7392c322016-05-18 13:00:38 +0200669 amd_sched_job_begin(sched_job);
Christian König7392c322016-05-18 13:00:38 +0200670
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800671 fence = sched->ops->run_job(sched_job);
Christian König393a0bd2015-11-05 12:57:10 +0100672 amd_sched_fence_scheduled(s_fence);
Nicolai Hähnle29d25352017-09-28 11:51:32 +0200673
Christian König6f0e54a2015-08-05 21:22:10 +0200674 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100675 s_fence->parent = dma_fence_get(fence);
676 r = dma_fence_add_callback(fence, &s_fence->cb,
677 amd_sched_process_job);
Christian König6f0e54a2015-08-05 21:22:10 +0200678 if (r == -ENOENT)
Christian König258f3f92015-08-31 17:02:52 +0200679 amd_sched_process_job(fence, &s_fence->cb);
Christian König6f0e54a2015-08-05 21:22:10 +0200680 else if (r)
Christian König16a71332016-05-18 09:43:07 +0200681 DRM_ERROR("fence add callback failed (%d)\n",
682 r);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100683 dma_fence_put(fence);
Christian König27439fc2015-09-02 12:03:06 +0200684 } else {
685 DRM_ERROR("Failed to run job!\n");
Christian König258f3f92015-08-31 17:02:52 +0200686 amd_sched_process_job(NULL, &s_fence->cb);
Christian König6f0e54a2015-08-05 21:22:10 +0200687 }
Christian Königaef48522015-08-20 14:47:46 +0200688
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800689 count = kfifo_out(&entity->job_queue, &sched_job,
690 sizeof(sched_job));
691 WARN_ON(count != sizeof(sched_job));
Christian Königc2b6bd72015-08-25 21:39:31 +0200692 wake_up(&sched->job_scheduled);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800693 }
694 return 0;
695}
696
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800697/**
Christian König4f839a22015-09-08 20:22:31 +0200698 * Init a gpu scheduler instance
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800699 *
Christian König4f839a22015-09-08 20:22:31 +0200700 * @sched The pointer to the scheduler
Christian König69f7dd62015-08-20 17:24:40 +0200701 * @ops The backend operations for this scheduler.
Christian König69f7dd62015-08-20 17:24:40 +0200702 * @hw_submissions Number of hw submissions to do.
Christian König4f839a22015-09-08 20:22:31 +0200703 * @name Name used for debugging
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800704 *
Christian König4f839a22015-09-08 20:22:31 +0200705 * Return 0 on success, otherwise error code.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800706*/
Christian König4f839a22015-09-08 20:22:31 +0200707int amd_sched_init(struct amd_gpu_scheduler *sched,
Nils Wallménius62250a92016-04-10 16:30:00 +0200708 const struct amd_sched_backend_ops *ops,
Monk Liu95aa9b12017-10-17 13:40:54 +0800709 unsigned hw_submission,
710 unsigned hang_limit,
711 long timeout,
712 const char *name)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800713{
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800714 int i;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800715 sched->ops = ops;
Chunming Zhou4cef9262015-08-05 19:52:14 +0800716 sched->hw_submission_limit = hw_submission;
Christian König4f839a22015-09-08 20:22:31 +0200717 sched->name = name;
Junwei Zhang2440ff22015-10-10 08:48:42 +0800718 sched->timeout = timeout;
Monk Liu95aa9b12017-10-17 13:40:54 +0800719 sched->hang_limit = hang_limit;
Chunming Zhou153de9d2017-03-16 11:44:49 +0800720 for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++)
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800721 amd_sched_rq_init(&sched->sched_rq[i]);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800722
Christian Königc2b6bd72015-08-25 21:39:31 +0200723 init_waitqueue_head(&sched->wake_up_worker);
724 init_waitqueue_head(&sched->job_scheduled);
Monk Liu48350962016-03-04 14:33:44 +0800725 INIT_LIST_HEAD(&sched->ring_mirror_list);
726 spin_lock_init(&sched->job_list_lock);
Christian Königc746ba22015-08-19 16:12:15 +0200727 atomic_set(&sched->hw_rq_count, 0);
Andres Rodriguez93f8b362017-03-09 21:25:50 -0500728 atomic64_set(&sched->job_id_count, 0);
Christian König4f839a22015-09-08 20:22:31 +0200729
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800730 /* Each scheduler will run on a seperate kernel thread */
Christian Königc14692f02015-08-21 15:18:47 +0200731 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
Christian Königf4956592015-08-20 16:59:38 +0200732 if (IS_ERR(sched->thread)) {
Christian König4f839a22015-09-08 20:22:31 +0200733 DRM_ERROR("Failed to create scheduler for %s.\n", name);
734 return PTR_ERR(sched->thread);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800735 }
736
Christian König4f839a22015-09-08 20:22:31 +0200737 return 0;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800738}
739
740/**
741 * Destroy a gpu scheduler
742 *
743 * @sched The pointer to the scheduler
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800744 */
Christian König4f839a22015-09-08 20:22:31 +0200745void amd_sched_fini(struct amd_gpu_scheduler *sched)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800746{
Dave Airlie32544d02015-11-03 11:10:03 -0500747 if (sched->thread)
748 kthread_stop(sched->thread);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800749}