blob: 5364e6a7ec8fc8c40dcf593d4d6fe7d2583a2085 [file] [log] [blame]
Jammy Zhoua72ce6f2015-05-22 18:55:07 +08001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <drm/drmP.h>
28#include "gpu_scheduler.h"
29
Chunming Zhou353da3c2015-09-07 16:06:53 +080030#define CREATE_TRACE_POINTS
31#include "gpu_sched_trace.h"
32
Christian König3d651932015-11-12 21:10:35 +010033static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
Christian König88079002015-08-24 14:29:40 +020034static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
Chris Wilsonf54d1862016-10-25 13:00:45 +010035static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
Christian König88079002015-08-24 14:29:40 +020036
Chunming Zhouf5617f92015-11-05 11:41:50 +080037struct kmem_cache *sched_fence_slab;
38atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
39
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080040/* Initialize a given run queue struct */
Christian König432a4ff2015-08-12 11:46:04 +020041static void amd_sched_rq_init(struct amd_sched_rq *rq)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080042{
Christian König2b184d82015-08-18 14:41:25 +020043 spin_lock_init(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020044 INIT_LIST_HEAD(&rq->entities);
Christian König432a4ff2015-08-12 11:46:04 +020045 rq->current_entity = NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080046}
47
Christian König432a4ff2015-08-12 11:46:04 +020048static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
49 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080050{
Chunming Zhoue8deea22015-12-11 18:22:52 +080051 if (!list_empty(&entity->list))
52 return;
Christian König2b184d82015-08-18 14:41:25 +020053 spin_lock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020054 list_add_tail(&entity->list, &rq->entities);
Christian König2b184d82015-08-18 14:41:25 +020055 spin_unlock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020056}
57
58static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
59 struct amd_sched_entity *entity)
60{
Chunming Zhoue8deea22015-12-11 18:22:52 +080061 if (list_empty(&entity->list))
62 return;
Christian König2b184d82015-08-18 14:41:25 +020063 spin_lock(&rq->lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080064 list_del_init(&entity->list);
Christian König432a4ff2015-08-12 11:46:04 +020065 if (rq->current_entity == entity)
66 rq->current_entity = NULL;
Christian König2b184d82015-08-18 14:41:25 +020067 spin_unlock(&rq->lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080068}
69
70/**
Christian König3d651932015-11-12 21:10:35 +010071 * Select an entity which could provide a job to run
72 *
73 * @rq The run queue to check.
74 *
75 * Try to find a ready entity, returns NULL if none found.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080076 */
Christian König3d651932015-11-12 21:10:35 +010077static struct amd_sched_entity *
78amd_sched_rq_select_entity(struct amd_sched_rq *rq)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080079{
Christian König2b184d82015-08-18 14:41:25 +020080 struct amd_sched_entity *entity;
Christian König4cd7f42c2015-08-05 18:18:52 +020081
Christian König2b184d82015-08-18 14:41:25 +020082 spin_lock(&rq->lock);
83
84 entity = rq->current_entity;
Christian König432a4ff2015-08-12 11:46:04 +020085 if (entity) {
86 list_for_each_entry_continue(entity, &rq->entities, list) {
Christian König3d651932015-11-12 21:10:35 +010087 if (amd_sched_entity_is_ready(entity)) {
Christian König432a4ff2015-08-12 11:46:04 +020088 rq->current_entity = entity;
Christian König2b184d82015-08-18 14:41:25 +020089 spin_unlock(&rq->lock);
Christian König3d651932015-11-12 21:10:35 +010090 return entity;
Christian König432a4ff2015-08-12 11:46:04 +020091 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080092 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080093 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080094
Christian König432a4ff2015-08-12 11:46:04 +020095 list_for_each_entry(entity, &rq->entities, list) {
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080096
Christian König3d651932015-11-12 21:10:35 +010097 if (amd_sched_entity_is_ready(entity)) {
Christian König432a4ff2015-08-12 11:46:04 +020098 rq->current_entity = entity;
Christian König2b184d82015-08-18 14:41:25 +020099 spin_unlock(&rq->lock);
Christian König3d651932015-11-12 21:10:35 +0100100 return entity;
Christian König432a4ff2015-08-12 11:46:04 +0200101 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800102
Christian König432a4ff2015-08-12 11:46:04 +0200103 if (entity == rq->current_entity)
104 break;
105 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800106
Christian König2b184d82015-08-18 14:41:25 +0200107 spin_unlock(&rq->lock);
108
Christian König432a4ff2015-08-12 11:46:04 +0200109 return NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800110}
111
112/**
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800113 * Init a context entity used by scheduler when submit to HW ring.
114 *
115 * @sched The pointer to the scheduler
Christian König91404fb2015-08-05 18:33:21 +0200116 * @entity The pointer to a valid amd_sched_entity
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800117 * @rq The run queue this entity belongs
Christian König0e89d0c2015-08-04 16:58:36 +0200118 * @kernel If this is an entity for the kernel
Jammy Zhou1333f722015-07-30 16:36:58 +0800119 * @jobs The max number of jobs in the job queue
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800120 *
121 * return 0 if succeed. negative error code on failure
122*/
Christian König91404fb2015-08-05 18:33:21 +0200123int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
Christian König6f0e54a2015-08-05 21:22:10 +0200124 struct amd_sched_entity *entity,
Christian König432a4ff2015-08-12 11:46:04 +0200125 struct amd_sched_rq *rq,
Christian König6f0e54a2015-08-05 21:22:10 +0200126 uint32_t jobs)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800127{
Christian König0f75aee2015-09-07 18:07:14 +0200128 int r;
129
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800130 if (!(sched && entity && rq))
131 return -EINVAL;
132
Christian König91404fb2015-08-05 18:33:21 +0200133 memset(entity, 0, sizeof(struct amd_sched_entity));
Christian König0f75aee2015-09-07 18:07:14 +0200134 INIT_LIST_HEAD(&entity->list);
135 entity->rq = rq;
136 entity->sched = sched;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800137
138 spin_lock_init(&entity->queue_lock);
Christian König0f75aee2015-09-07 18:07:14 +0200139 r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
140 if (r)
141 return r;
142
Christian Königce882e62015-08-19 15:00:55 +0200143 atomic_set(&entity->fence_seq, 0);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100144 entity->fence_context = dma_fence_context_alloc(2);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800145
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800146 return 0;
147}
148
149/**
150 * Query if entity is initialized
151 *
152 * @sched Pointer to scheduler instance
153 * @entity The pointer to a valid scheduler entity
154 *
155 * return true if entity is initialized, false otherwise
156*/
Christian Königd54fdb92015-08-20 17:03:48 +0200157static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
158 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800159{
Christian König0f75aee2015-09-07 18:07:14 +0200160 return entity->sched == sched &&
161 entity->rq != NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800162}
163
Christian Königaef48522015-08-20 14:47:46 +0200164/**
165 * Check if entity is idle
166 *
167 * @entity The pointer to a valid scheduler entity
168 *
169 * Return true if entity don't has any unscheduled jobs.
170 */
171static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800172{
Christian Königaef48522015-08-20 14:47:46 +0200173 rmb();
174 if (kfifo_is_empty(&entity->job_queue))
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800175 return true;
176
177 return false;
178}
179
180/**
Christian König3d651932015-11-12 21:10:35 +0100181 * Check if entity is ready
182 *
183 * @entity The pointer to a valid scheduler entity
184 *
185 * Return true if entity could provide a job.
186 */
187static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
188{
189 if (kfifo_is_empty(&entity->job_queue))
190 return false;
191
192 if (ACCESS_ONCE(entity->dependency))
193 return false;
194
195 return true;
196}
197
198/**
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800199 * Destroy a context entity
200 *
201 * @sched Pointer to scheduler instance
202 * @entity The pointer to a valid scheduler entity
203 *
Christian König062c7fb2015-08-21 15:46:43 +0200204 * Cleanup and free the allocated resources.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800205 */
Christian König062c7fb2015-08-21 15:46:43 +0200206void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
207 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800208{
Christian König0f75aee2015-09-07 18:07:14 +0200209 struct amd_sched_rq *rq = entity->rq;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800210
Christian Königd54fdb92015-08-20 17:03:48 +0200211 if (!amd_sched_entity_is_initialized(sched, entity))
Christian König062c7fb2015-08-21 15:46:43 +0200212 return;
Christian König6c859272015-08-20 16:12:50 +0200213
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800214 /**
215 * The client will not queue more IBs during this fini, consume existing
216 * queued IBs
217 */
Christian Königc2b6bd72015-08-25 21:39:31 +0200218 wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800219
Christian König432a4ff2015-08-12 11:46:04 +0200220 amd_sched_rq_remove_entity(rq, entity);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800221 kfifo_free(&entity->job_queue);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800222}
223
Chris Wilsonf54d1862016-10-25 13:00:45 +0100224static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
Christian Könige61235d2015-08-25 11:05:36 +0200225{
226 struct amd_sched_entity *entity =
227 container_of(cb, struct amd_sched_entity, cb);
228 entity->dependency = NULL;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100229 dma_fence_put(f);
Christian König0f75aee2015-09-07 18:07:14 +0200230 amd_sched_wakeup(entity->sched);
Christian Könige61235d2015-08-25 11:05:36 +0200231}
232
Chris Wilsonf54d1862016-10-25 13:00:45 +0100233static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
Monk Liu777dbd42016-01-26 14:59:57 +0800234{
235 struct amd_sched_entity *entity =
236 container_of(cb, struct amd_sched_entity, cb);
237 entity->dependency = NULL;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100238 dma_fence_put(f);
Monk Liu777dbd42016-01-26 14:59:57 +0800239}
240
Christian König393a0bd2015-11-05 12:57:10 +0100241static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
242{
243 struct amd_gpu_scheduler *sched = entity->sched;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100244 struct dma_fence * fence = entity->dependency;
Christian König393a0bd2015-11-05 12:57:10 +0100245 struct amd_sched_fence *s_fence;
246
247 if (fence->context == entity->fence_context) {
248 /* We can ignore fences from ourself */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100249 dma_fence_put(entity->dependency);
Christian König393a0bd2015-11-05 12:57:10 +0100250 return false;
251 }
252
253 s_fence = to_amd_sched_fence(fence);
254 if (s_fence && s_fence->sched == sched) {
Christian König393a0bd2015-11-05 12:57:10 +0100255
Christian König6fc13672016-05-20 12:53:52 +0200256 /*
257 * Fence is from the same scheduler, only need to wait for
258 * it to be scheduled
259 */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100260 fence = dma_fence_get(&s_fence->scheduled);
261 dma_fence_put(entity->dependency);
Christian König6fc13672016-05-20 12:53:52 +0200262 entity->dependency = fence;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100263 if (!dma_fence_add_callback(fence, &entity->cb,
264 amd_sched_entity_clear_dep))
Christian König6fc13672016-05-20 12:53:52 +0200265 return true;
266
267 /* Ignore it when it is already scheduled */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100268 dma_fence_put(fence);
Christian König6fc13672016-05-20 12:53:52 +0200269 return false;
Christian König393a0bd2015-11-05 12:57:10 +0100270 }
271
Chris Wilsonf54d1862016-10-25 13:00:45 +0100272 if (!dma_fence_add_callback(entity->dependency, &entity->cb,
273 amd_sched_entity_wakeup))
Christian König393a0bd2015-11-05 12:57:10 +0100274 return true;
275
Chris Wilsonf54d1862016-10-25 13:00:45 +0100276 dma_fence_put(entity->dependency);
Christian König393a0bd2015-11-05 12:57:10 +0100277 return false;
278}
279
Christian König69bd5bf2015-08-26 11:31:23 +0200280static struct amd_sched_job *
281amd_sched_entity_pop_job(struct amd_sched_entity *entity)
282{
Christian König0f75aee2015-09-07 18:07:14 +0200283 struct amd_gpu_scheduler *sched = entity->sched;
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800284 struct amd_sched_job *sched_job;
Christian König69bd5bf2015-08-26 11:31:23 +0200285
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800286 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
Christian König69bd5bf2015-08-26 11:31:23 +0200287 return NULL;
288
Christian König393a0bd2015-11-05 12:57:10 +0100289 while ((entity->dependency = sched->ops->dependency(sched_job)))
290 if (amd_sched_entity_add_dependency_cb(entity))
Christian Könige61235d2015-08-25 11:05:36 +0200291 return NULL;
Christian Könige61235d2015-08-25 11:05:36 +0200292
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800293 return sched_job;
Christian König69bd5bf2015-08-26 11:31:23 +0200294}
295
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800296/**
Christian König6c859272015-08-20 16:12:50 +0200297 * Helper to submit a job to the job queue
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800298 *
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800299 * @sched_job The pointer to job required to submit
Christian König6c859272015-08-20 16:12:50 +0200300 *
301 * Returns true if we could submit the job.
302 */
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800303static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800304{
Nicolai Hähnle786b5212015-12-02 17:35:12 +0100305 struct amd_gpu_scheduler *sched = sched_job->sched;
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800306 struct amd_sched_entity *entity = sched_job->s_entity;
Christian König6c859272015-08-20 16:12:50 +0200307 bool added, first = false;
308
309 spin_lock(&entity->queue_lock);
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800310 added = kfifo_in(&entity->job_queue, &sched_job,
311 sizeof(sched_job)) == sizeof(sched_job);
Christian König6c859272015-08-20 16:12:50 +0200312
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800313 if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
Christian König6c859272015-08-20 16:12:50 +0200314 first = true;
315
316 spin_unlock(&entity->queue_lock);
317
318 /* first job wakes up scheduler */
Chunming Zhoue8deea22015-12-11 18:22:52 +0800319 if (first) {
320 /* Add the entity to the run queue */
321 amd_sched_rq_add_entity(entity->rq, entity);
Nicolai Hähnle786b5212015-12-02 17:35:12 +0100322 amd_sched_wakeup(sched);
Chunming Zhoue8deea22015-12-11 18:22:52 +0800323 }
Christian König6c859272015-08-20 16:12:50 +0200324 return added;
325}
326
Monk Liu0de24792016-03-04 18:51:02 +0800327/* job_finish is called after hw fence signaled, and
328 * the job had already been deleted from ring_mirror_list
329 */
Christian Königc5f74f72016-05-19 09:54:15 +0200330static void amd_sched_job_finish(struct work_struct *work)
Monk Liu0de24792016-03-04 18:51:02 +0800331{
Christian Königc5f74f72016-05-19 09:54:15 +0200332 struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
333 finish_work);
Monk Liu0de24792016-03-04 18:51:02 +0800334 struct amd_gpu_scheduler *sched = s_job->sched;
335
Christian Königf42d20a92016-05-18 15:40:58 +0200336 /* remove job from ring_mirror_list */
Christian König1059e112016-06-13 16:12:43 +0200337 spin_lock(&sched->job_list_lock);
Christian Königf42d20a92016-05-18 15:40:58 +0200338 list_del_init(&s_job->node);
Monk Liu0de24792016-03-04 18:51:02 +0800339 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
Christian Königc5f74f72016-05-19 09:54:15 +0200340 struct amd_sched_job *next;
341
Christian König1059e112016-06-13 16:12:43 +0200342 spin_unlock(&sched->job_list_lock);
Christian Königc5f74f72016-05-19 09:54:15 +0200343 cancel_delayed_work_sync(&s_job->work_tdr);
Christian König1059e112016-06-13 16:12:43 +0200344 spin_lock(&sched->job_list_lock);
Monk Liu0de24792016-03-04 18:51:02 +0800345
346 /* queue TDR for next job */
347 next = list_first_entry_or_null(&sched->ring_mirror_list,
348 struct amd_sched_job, node);
349
Christian Königc5f74f72016-05-19 09:54:15 +0200350 if (next)
Monk Liu0de24792016-03-04 18:51:02 +0800351 schedule_delayed_work(&next->work_tdr, sched->timeout);
Monk Liu0de24792016-03-04 18:51:02 +0800352 }
Christian König1059e112016-06-13 16:12:43 +0200353 spin_unlock(&sched->job_list_lock);
Christian Königc5f74f72016-05-19 09:54:15 +0200354 sched->ops->free_job(s_job);
355}
356
Chris Wilsonf54d1862016-10-25 13:00:45 +0100357static void amd_sched_job_finish_cb(struct dma_fence *f,
358 struct dma_fence_cb *cb)
Christian Königc5f74f72016-05-19 09:54:15 +0200359{
360 struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
361 finish_cb);
362 schedule_work(&job->finish_work);
Monk Liu0de24792016-03-04 18:51:02 +0800363}
364
Christian König7392c322016-05-18 13:00:38 +0200365static void amd_sched_job_begin(struct amd_sched_job *s_job)
Monk Liu0de24792016-03-04 18:51:02 +0800366{
367 struct amd_gpu_scheduler *sched = s_job->sched;
368
Christian König1059e112016-06-13 16:12:43 +0200369 spin_lock(&sched->job_list_lock);
Christian Königf42d20a92016-05-18 15:40:58 +0200370 list_add_tail(&s_job->node, &sched->ring_mirror_list);
Monk Liu0de24792016-03-04 18:51:02 +0800371 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
Christian König16a71332016-05-18 09:43:07 +0200372 list_first_entry_or_null(&sched->ring_mirror_list,
373 struct amd_sched_job, node) == s_job)
Monk Liu0de24792016-03-04 18:51:02 +0800374 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
Christian König1059e112016-06-13 16:12:43 +0200375 spin_unlock(&sched->job_list_lock);
Monk Liu0de24792016-03-04 18:51:02 +0800376}
377
Christian König0e51a772016-05-18 14:19:32 +0200378static void amd_sched_job_timedout(struct work_struct *work)
379{
380 struct amd_sched_job *job = container_of(work, struct amd_sched_job,
381 work_tdr.work);
382
383 job->sched->ops->timedout_job(job);
384}
385
Chunming Zhoue686e752016-06-30 11:30:37 +0800386void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
387{
388 struct amd_sched_job *s_job;
389
390 spin_lock(&sched->job_list_lock);
391 list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100392 if (dma_fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
393 dma_fence_put(s_job->s_fence->parent);
Chunming Zhoue686e752016-06-30 11:30:37 +0800394 s_job->s_fence->parent = NULL;
395 }
396 }
Chunming Zhoubdc2eea2016-07-22 13:01:02 +0800397 atomic_set(&sched->hw_rq_count, 0);
Chunming Zhoue686e752016-06-30 11:30:37 +0800398 spin_unlock(&sched->job_list_lock);
399}
400
Chunming Zhouec75f572016-06-29 15:23:55 +0800401void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
402{
Chunming Zhou1c62cf92016-07-25 13:55:35 +0800403 struct amd_sched_job *s_job, *tmp;
Chunming Zhouec75f572016-06-29 15:23:55 +0800404 int r;
405
406 spin_lock(&sched->job_list_lock);
407 s_job = list_first_entry_or_null(&sched->ring_mirror_list,
408 struct amd_sched_job, node);
Christian Königbdf00132016-08-16 19:52:35 +0200409 if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
Chunming Zhouec75f572016-06-29 15:23:55 +0800410 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
411
Chunming Zhou1c62cf92016-07-25 13:55:35 +0800412 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
Chunming Zhouec75f572016-06-29 15:23:55 +0800413 struct amd_sched_fence *s_fence = s_job->s_fence;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100414 struct dma_fence *fence;
Chunming Zhoubdc2eea2016-07-22 13:01:02 +0800415
Chunming Zhou1c62cf92016-07-25 13:55:35 +0800416 spin_unlock(&sched->job_list_lock);
417 fence = sched->ops->run_job(s_job);
Chunming Zhoubdc2eea2016-07-22 13:01:02 +0800418 atomic_inc(&sched->hw_rq_count);
Chunming Zhouec75f572016-06-29 15:23:55 +0800419 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100420 s_fence->parent = dma_fence_get(fence);
421 r = dma_fence_add_callback(fence, &s_fence->cb,
422 amd_sched_process_job);
Chunming Zhouec75f572016-06-29 15:23:55 +0800423 if (r == -ENOENT)
424 amd_sched_process_job(fence, &s_fence->cb);
425 else if (r)
426 DRM_ERROR("fence add callback failed (%d)\n",
427 r);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100428 dma_fence_put(fence);
Chunming Zhouec75f572016-06-29 15:23:55 +0800429 } else {
430 DRM_ERROR("Failed to run job!\n");
431 amd_sched_process_job(NULL, &s_fence->cb);
432 }
Chunming Zhou1c62cf92016-07-25 13:55:35 +0800433 spin_lock(&sched->job_list_lock);
Chunming Zhouec75f572016-06-29 15:23:55 +0800434 }
435 spin_unlock(&sched->job_list_lock);
436}
437
Christian König6c859272015-08-20 16:12:50 +0200438/**
439 * Submit a job to the job queue
440 *
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800441 * @sched_job The pointer to job required to submit
Christian König6c859272015-08-20 16:12:50 +0200442 *
443 * Returns 0 for success, negative error code otherwise.
444 */
Christian Könige2840222015-11-05 19:49:48 +0100445void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
Christian König6c859272015-08-20 16:12:50 +0200446{
447 struct amd_sched_entity *entity = sched_job->s_entity;
Christian König6c859272015-08-20 16:12:50 +0200448
Nicolai Hähnle786b5212015-12-02 17:35:12 +0100449 trace_amd_sched_job(sched_job);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100450 dma_fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
451 amd_sched_job_finish_cb);
Christian König0f75aee2015-09-07 18:07:14 +0200452 wait_event(entity->sched->job_scheduled,
Chunming Zhouc9f0fe52015-08-31 15:46:12 +0800453 amd_sched_entity_in(sched_job));
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800454}
455
Monk Liue6869412016-03-07 12:49:55 +0800456/* init a sched_job with basic field */
457int amd_sched_job_init(struct amd_sched_job *job,
Christian König16a71332016-05-18 09:43:07 +0200458 struct amd_gpu_scheduler *sched,
459 struct amd_sched_entity *entity,
Christian König595a9cd2016-06-30 10:52:03 +0200460 void *owner)
Monk Liue6869412016-03-07 12:49:55 +0800461{
462 job->sched = sched;
463 job->s_entity = entity;
464 job->s_fence = amd_sched_fence_create(entity, owner);
465 if (!job->s_fence)
466 return -ENOMEM;
467
Christian Königc5f74f72016-05-19 09:54:15 +0200468 INIT_WORK(&job->finish_work, amd_sched_job_finish);
469 INIT_LIST_HEAD(&job->node);
Christian König0e51a772016-05-18 14:19:32 +0200470 INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
Monk Liu48350962016-03-04 14:33:44 +0800471
Monk Liue6869412016-03-07 12:49:55 +0800472 return 0;
473}
474
Christian Könige688b7282015-08-20 17:01:01 +0200475/**
476 * Return ture if we can push more jobs to the hw.
477 */
478static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
479{
480 return atomic_read(&sched->hw_rq_count) <
481 sched->hw_submission_limit;
482}
483
484/**
Christian König88079002015-08-24 14:29:40 +0200485 * Wake up the scheduler when it is ready
486 */
487static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
488{
489 if (amd_sched_ready(sched))
Christian Königc2b6bd72015-08-25 21:39:31 +0200490 wake_up_interruptible(&sched->wake_up_worker);
Christian König88079002015-08-24 14:29:40 +0200491}
492
493/**
Christian König3d651932015-11-12 21:10:35 +0100494 * Select next entity to process
Christian Könige688b7282015-08-20 17:01:01 +0200495*/
Christian König3d651932015-11-12 21:10:35 +0100496static struct amd_sched_entity *
497amd_sched_select_entity(struct amd_gpu_scheduler *sched)
Christian Könige688b7282015-08-20 17:01:01 +0200498{
Christian König3d651932015-11-12 21:10:35 +0100499 struct amd_sched_entity *entity;
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800500 int i;
Christian Könige688b7282015-08-20 17:01:01 +0200501
502 if (!amd_sched_ready(sched))
503 return NULL;
504
505 /* Kernel run queue has higher priority than normal run queue*/
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800506 for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++) {
507 entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
508 if (entity)
509 break;
510 }
Christian Könige688b7282015-08-20 17:01:01 +0200511
Christian König3d651932015-11-12 21:10:35 +0100512 return entity;
Christian Könige688b7282015-08-20 17:01:01 +0200513}
514
Chris Wilsonf54d1862016-10-25 13:00:45 +0100515static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
Christian König6f0e54a2015-08-05 21:22:10 +0200516{
Christian König258f3f92015-08-31 17:02:52 +0200517 struct amd_sched_fence *s_fence =
518 container_of(cb, struct amd_sched_fence, cb);
Christian König9b398fa2015-09-07 18:16:49 +0200519 struct amd_gpu_scheduler *sched = s_fence->sched;
Christian König6f0e54a2015-08-05 21:22:10 +0200520
Christian Königc746ba22015-08-19 16:12:15 +0200521 atomic_dec(&sched->hw_rq_count);
Christian König6fc13672016-05-20 12:53:52 +0200522 amd_sched_fence_finished(s_fence);
Monk Liucccd9bc2016-03-04 14:42:26 +0800523
Chunming Zhou7034dec2015-11-11 14:56:00 +0800524 trace_amd_sched_process_job(s_fence);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100525 dma_fence_put(&s_fence->finished);
Christian Königc2b6bd72015-08-25 21:39:31 +0200526 wake_up_interruptible(&sched->wake_up_worker);
Christian König6f0e54a2015-08-05 21:22:10 +0200527}
528
Chunming Zhou0875dc92016-06-12 15:41:58 +0800529static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
530{
531 if (kthread_should_park()) {
532 kthread_parkme();
533 return true;
534 }
535
536 return false;
537}
538
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800539static int amd_sched_main(void *param)
540{
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800541 struct sched_param sparam = {.sched_priority = 1};
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800542 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
Alex Deucher5134e992015-09-04 00:11:54 -0400543 int r, count;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800544
545 sched_setscheduler(current, SCHED_FIFO, &sparam);
546
547 while (!kthread_should_stop()) {
Chunming Zhou0875dc92016-06-12 15:41:58 +0800548 struct amd_sched_entity *entity = NULL;
Christian König258f3f92015-08-31 17:02:52 +0200549 struct amd_sched_fence *s_fence;
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800550 struct amd_sched_job *sched_job;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100551 struct dma_fence *fence;
Christian König6f0e54a2015-08-05 21:22:10 +0200552
Christian Königc2b6bd72015-08-25 21:39:31 +0200553 wait_event_interruptible(sched->wake_up_worker,
Chunming Zhou0875dc92016-06-12 15:41:58 +0800554 (!amd_sched_blocked(sched) &&
555 (entity = amd_sched_select_entity(sched))) ||
556 kthread_should_stop());
Christian Königf85a6dd2015-08-19 17:37:52 +0200557
Christian König3d651932015-11-12 21:10:35 +0100558 if (!entity)
559 continue;
560
561 sched_job = amd_sched_entity_pop_job(entity);
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800562 if (!sched_job)
Christian Königf85a6dd2015-08-19 17:37:52 +0200563 continue;
564
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800565 s_fence = sched_job->s_fence;
Junwei Zhang2440ff22015-10-10 08:48:42 +0800566
Christian Königb034b572015-08-20 17:08:25 +0200567 atomic_inc(&sched->hw_rq_count);
Christian König7392c322016-05-18 13:00:38 +0200568 amd_sched_job_begin(sched_job);
Christian König7392c322016-05-18 13:00:38 +0200569
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800570 fence = sched->ops->run_job(sched_job);
Christian König393a0bd2015-11-05 12:57:10 +0100571 amd_sched_fence_scheduled(s_fence);
Christian König6f0e54a2015-08-05 21:22:10 +0200572 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100573 s_fence->parent = dma_fence_get(fence);
574 r = dma_fence_add_callback(fence, &s_fence->cb,
575 amd_sched_process_job);
Christian König6f0e54a2015-08-05 21:22:10 +0200576 if (r == -ENOENT)
Christian König258f3f92015-08-31 17:02:52 +0200577 amd_sched_process_job(fence, &s_fence->cb);
Christian König6f0e54a2015-08-05 21:22:10 +0200578 else if (r)
Christian König16a71332016-05-18 09:43:07 +0200579 DRM_ERROR("fence add callback failed (%d)\n",
580 r);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100581 dma_fence_put(fence);
Christian König27439fc2015-09-02 12:03:06 +0200582 } else {
583 DRM_ERROR("Failed to run job!\n");
Christian König258f3f92015-08-31 17:02:52 +0200584 amd_sched_process_job(NULL, &s_fence->cb);
Christian König6f0e54a2015-08-05 21:22:10 +0200585 }
Christian Königaef48522015-08-20 14:47:46 +0200586
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800587 count = kfifo_out(&entity->job_queue, &sched_job,
588 sizeof(sched_job));
589 WARN_ON(count != sizeof(sched_job));
Christian Königc2b6bd72015-08-25 21:39:31 +0200590 wake_up(&sched->job_scheduled);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800591 }
592 return 0;
593}
594
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800595/**
Christian König4f839a22015-09-08 20:22:31 +0200596 * Init a gpu scheduler instance
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800597 *
Christian König4f839a22015-09-08 20:22:31 +0200598 * @sched The pointer to the scheduler
Christian König69f7dd62015-08-20 17:24:40 +0200599 * @ops The backend operations for this scheduler.
Christian König69f7dd62015-08-20 17:24:40 +0200600 * @hw_submissions Number of hw submissions to do.
Christian König4f839a22015-09-08 20:22:31 +0200601 * @name Name used for debugging
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800602 *
Christian König4f839a22015-09-08 20:22:31 +0200603 * Return 0 on success, otherwise error code.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800604*/
Christian König4f839a22015-09-08 20:22:31 +0200605int amd_sched_init(struct amd_gpu_scheduler *sched,
Nils Wallménius62250a92016-04-10 16:30:00 +0200606 const struct amd_sched_backend_ops *ops,
Junwei Zhang2440ff22015-10-10 08:48:42 +0800607 unsigned hw_submission, long timeout, const char *name)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800608{
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800609 int i;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800610 sched->ops = ops;
Chunming Zhou4cef9262015-08-05 19:52:14 +0800611 sched->hw_submission_limit = hw_submission;
Christian König4f839a22015-09-08 20:22:31 +0200612 sched->name = name;
Junwei Zhang2440ff22015-10-10 08:48:42 +0800613 sched->timeout = timeout;
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800614 for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++)
615 amd_sched_rq_init(&sched->sched_rq[i]);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800616
Christian Königc2b6bd72015-08-25 21:39:31 +0200617 init_waitqueue_head(&sched->wake_up_worker);
618 init_waitqueue_head(&sched->job_scheduled);
Monk Liu48350962016-03-04 14:33:44 +0800619 INIT_LIST_HEAD(&sched->ring_mirror_list);
620 spin_lock_init(&sched->job_list_lock);
Christian Königc746ba22015-08-19 16:12:15 +0200621 atomic_set(&sched->hw_rq_count, 0);
Chunming Zhouf5617f92015-11-05 11:41:50 +0800622 if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
623 sched_fence_slab = kmem_cache_create(
624 "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
625 SLAB_HWCACHE_ALIGN, NULL);
626 if (!sched_fence_slab)
627 return -ENOMEM;
628 }
Christian König4f839a22015-09-08 20:22:31 +0200629
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800630 /* Each scheduler will run on a seperate kernel thread */
Christian Königc14692f02015-08-21 15:18:47 +0200631 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
Christian Königf4956592015-08-20 16:59:38 +0200632 if (IS_ERR(sched->thread)) {
Christian König4f839a22015-09-08 20:22:31 +0200633 DRM_ERROR("Failed to create scheduler for %s.\n", name);
634 return PTR_ERR(sched->thread);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800635 }
636
Christian König4f839a22015-09-08 20:22:31 +0200637 return 0;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800638}
639
640/**
641 * Destroy a gpu scheduler
642 *
643 * @sched The pointer to the scheduler
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800644 */
Christian König4f839a22015-09-08 20:22:31 +0200645void amd_sched_fini(struct amd_gpu_scheduler *sched)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800646{
Dave Airlie32544d02015-11-03 11:10:03 -0500647 if (sched->thread)
648 kthread_stop(sched->thread);
Chunming Zhouf5617f92015-11-05 11:41:50 +0800649 if (atomic_dec_and_test(&sched_fence_slab_ref))
650 kmem_cache_destroy(sched_fence_slab);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800651}