blob: cb56d9065e431c36540cce8b47e2a71d34ef25a9 [file] [log] [blame]
Jammy Zhoua72ce6f2015-05-22 18:55:07 +08001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <drm/drmP.h>
28#include "gpu_scheduler.h"
29
Chunming Zhou353da3c2015-09-07 16:06:53 +080030#define CREATE_TRACE_POINTS
31#include "gpu_sched_trace.h"
32
Christian König3d651932015-11-12 21:10:35 +010033static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
Christian König88079002015-08-24 14:29:40 +020034static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
35
Chunming Zhouf5617f92015-11-05 11:41:50 +080036struct kmem_cache *sched_fence_slab;
37atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
38
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080039/* Initialize a given run queue struct */
Christian König432a4ff2015-08-12 11:46:04 +020040static void amd_sched_rq_init(struct amd_sched_rq *rq)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080041{
Christian König2b184d82015-08-18 14:41:25 +020042 spin_lock_init(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020043 INIT_LIST_HEAD(&rq->entities);
Christian König432a4ff2015-08-12 11:46:04 +020044 rq->current_entity = NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080045}
46
Christian König432a4ff2015-08-12 11:46:04 +020047static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
48 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080049{
Chunming Zhoue8deea22015-12-11 18:22:52 +080050 if (!list_empty(&entity->list))
51 return;
Christian König2b184d82015-08-18 14:41:25 +020052 spin_lock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020053 list_add_tail(&entity->list, &rq->entities);
Christian König2b184d82015-08-18 14:41:25 +020054 spin_unlock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020055}
56
57static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
58 struct amd_sched_entity *entity)
59{
Chunming Zhoue8deea22015-12-11 18:22:52 +080060 if (list_empty(&entity->list))
61 return;
Christian König2b184d82015-08-18 14:41:25 +020062 spin_lock(&rq->lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080063 list_del_init(&entity->list);
Christian König432a4ff2015-08-12 11:46:04 +020064 if (rq->current_entity == entity)
65 rq->current_entity = NULL;
Christian König2b184d82015-08-18 14:41:25 +020066 spin_unlock(&rq->lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080067}
68
69/**
Christian König3d651932015-11-12 21:10:35 +010070 * Select an entity which could provide a job to run
71 *
72 * @rq The run queue to check.
73 *
74 * Try to find a ready entity, returns NULL if none found.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080075 */
Christian König3d651932015-11-12 21:10:35 +010076static struct amd_sched_entity *
77amd_sched_rq_select_entity(struct amd_sched_rq *rq)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080078{
Christian König2b184d82015-08-18 14:41:25 +020079 struct amd_sched_entity *entity;
Christian König4cd7f42c2015-08-05 18:18:52 +020080
Christian König2b184d82015-08-18 14:41:25 +020081 spin_lock(&rq->lock);
82
83 entity = rq->current_entity;
Christian König432a4ff2015-08-12 11:46:04 +020084 if (entity) {
85 list_for_each_entry_continue(entity, &rq->entities, list) {
Christian König3d651932015-11-12 21:10:35 +010086 if (amd_sched_entity_is_ready(entity)) {
Christian König432a4ff2015-08-12 11:46:04 +020087 rq->current_entity = entity;
Christian König2b184d82015-08-18 14:41:25 +020088 spin_unlock(&rq->lock);
Christian König3d651932015-11-12 21:10:35 +010089 return entity;
Christian König432a4ff2015-08-12 11:46:04 +020090 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080091 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080092 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080093
Christian König432a4ff2015-08-12 11:46:04 +020094 list_for_each_entry(entity, &rq->entities, list) {
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080095
Christian König3d651932015-11-12 21:10:35 +010096 if (amd_sched_entity_is_ready(entity)) {
Christian König432a4ff2015-08-12 11:46:04 +020097 rq->current_entity = entity;
Christian König2b184d82015-08-18 14:41:25 +020098 spin_unlock(&rq->lock);
Christian König3d651932015-11-12 21:10:35 +010099 return entity;
Christian König432a4ff2015-08-12 11:46:04 +0200100 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800101
Christian König432a4ff2015-08-12 11:46:04 +0200102 if (entity == rq->current_entity)
103 break;
104 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800105
Christian König2b184d82015-08-18 14:41:25 +0200106 spin_unlock(&rq->lock);
107
Christian König432a4ff2015-08-12 11:46:04 +0200108 return NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800109}
110
111/**
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800112 * Init a context entity used by scheduler when submit to HW ring.
113 *
114 * @sched The pointer to the scheduler
Christian König91404fb2015-08-05 18:33:21 +0200115 * @entity The pointer to a valid amd_sched_entity
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800116 * @rq The run queue this entity belongs
Christian König0e89d0c2015-08-04 16:58:36 +0200117 * @kernel If this is an entity for the kernel
Jammy Zhou1333f722015-07-30 16:36:58 +0800118 * @jobs The max number of jobs in the job queue
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800119 *
120 * return 0 if succeed. negative error code on failure
121*/
Christian König91404fb2015-08-05 18:33:21 +0200122int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
Christian König6f0e54a2015-08-05 21:22:10 +0200123 struct amd_sched_entity *entity,
Christian König432a4ff2015-08-12 11:46:04 +0200124 struct amd_sched_rq *rq,
Christian König6f0e54a2015-08-05 21:22:10 +0200125 uint32_t jobs)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800126{
Christian König0f75aee2015-09-07 18:07:14 +0200127 int r;
128
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800129 if (!(sched && entity && rq))
130 return -EINVAL;
131
Christian König91404fb2015-08-05 18:33:21 +0200132 memset(entity, 0, sizeof(struct amd_sched_entity));
Christian König0f75aee2015-09-07 18:07:14 +0200133 INIT_LIST_HEAD(&entity->list);
134 entity->rq = rq;
135 entity->sched = sched;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800136
137 spin_lock_init(&entity->queue_lock);
Christian König0f75aee2015-09-07 18:07:14 +0200138 r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
139 if (r)
140 return r;
141
Christian Königce882e62015-08-19 15:00:55 +0200142 atomic_set(&entity->fence_seq, 0);
Christian König0f75aee2015-09-07 18:07:14 +0200143 entity->fence_context = fence_context_alloc(1);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800144
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800145 return 0;
146}
147
148/**
149 * Query if entity is initialized
150 *
151 * @sched Pointer to scheduler instance
152 * @entity The pointer to a valid scheduler entity
153 *
154 * return true if entity is initialized, false otherwise
155*/
Christian Königd54fdb92015-08-20 17:03:48 +0200156static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
157 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800158{
Christian König0f75aee2015-09-07 18:07:14 +0200159 return entity->sched == sched &&
160 entity->rq != NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800161}
162
Christian Königaef48522015-08-20 14:47:46 +0200163/**
164 * Check if entity is idle
165 *
166 * @entity The pointer to a valid scheduler entity
167 *
168 * Return true if entity don't has any unscheduled jobs.
169 */
170static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800171{
Christian Königaef48522015-08-20 14:47:46 +0200172 rmb();
173 if (kfifo_is_empty(&entity->job_queue))
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800174 return true;
175
176 return false;
177}
178
179/**
Christian König3d651932015-11-12 21:10:35 +0100180 * Check if entity is ready
181 *
182 * @entity The pointer to a valid scheduler entity
183 *
184 * Return true if entity could provide a job.
185 */
186static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
187{
188 if (kfifo_is_empty(&entity->job_queue))
189 return false;
190
191 if (ACCESS_ONCE(entity->dependency))
192 return false;
193
194 return true;
195}
196
197/**
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800198 * Destroy a context entity
199 *
200 * @sched Pointer to scheduler instance
201 * @entity The pointer to a valid scheduler entity
202 *
Christian König062c7fb2015-08-21 15:46:43 +0200203 * Cleanup and free the allocated resources.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800204 */
Christian König062c7fb2015-08-21 15:46:43 +0200205void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
206 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800207{
Christian König0f75aee2015-09-07 18:07:14 +0200208 struct amd_sched_rq *rq = entity->rq;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800209
Christian Königd54fdb92015-08-20 17:03:48 +0200210 if (!amd_sched_entity_is_initialized(sched, entity))
Christian König062c7fb2015-08-21 15:46:43 +0200211 return;
Christian König6c859272015-08-20 16:12:50 +0200212
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800213 /**
214 * The client will not queue more IBs during this fini, consume existing
215 * queued IBs
216 */
Christian Königc2b6bd72015-08-25 21:39:31 +0200217 wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800218
Christian König432a4ff2015-08-12 11:46:04 +0200219 amd_sched_rq_remove_entity(rq, entity);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800220 kfifo_free(&entity->job_queue);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800221}
222
Christian Könige61235d2015-08-25 11:05:36 +0200223static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
224{
225 struct amd_sched_entity *entity =
226 container_of(cb, struct amd_sched_entity, cb);
227 entity->dependency = NULL;
228 fence_put(f);
Christian König0f75aee2015-09-07 18:07:14 +0200229 amd_sched_wakeup(entity->sched);
Christian Könige61235d2015-08-25 11:05:36 +0200230}
231
Monk Liu777dbd42016-01-26 14:59:57 +0800232static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb)
233{
234 struct amd_sched_entity *entity =
235 container_of(cb, struct amd_sched_entity, cb);
236 entity->dependency = NULL;
237 fence_put(f);
238}
239
Christian König393a0bd2015-11-05 12:57:10 +0100240static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
241{
242 struct amd_gpu_scheduler *sched = entity->sched;
243 struct fence * fence = entity->dependency;
244 struct amd_sched_fence *s_fence;
245
246 if (fence->context == entity->fence_context) {
247 /* We can ignore fences from ourself */
248 fence_put(entity->dependency);
249 return false;
250 }
251
252 s_fence = to_amd_sched_fence(fence);
253 if (s_fence && s_fence->sched == sched) {
254 /* Fence is from the same scheduler */
255 if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) {
256 /* Ignore it when it is already scheduled */
257 fence_put(entity->dependency);
258 return false;
259 }
260
261 /* Wait for fence to be scheduled */
Monk Liu777dbd42016-01-26 14:59:57 +0800262 entity->cb.func = amd_sched_entity_clear_dep;
Christian König393a0bd2015-11-05 12:57:10 +0100263 list_add_tail(&entity->cb.node, &s_fence->scheduled_cb);
264 return true;
265 }
266
267 if (!fence_add_callback(entity->dependency, &entity->cb,
268 amd_sched_entity_wakeup))
269 return true;
270
271 fence_put(entity->dependency);
272 return false;
273}
274
Christian König69bd5bf2015-08-26 11:31:23 +0200275static struct amd_sched_job *
276amd_sched_entity_pop_job(struct amd_sched_entity *entity)
277{
Christian König0f75aee2015-09-07 18:07:14 +0200278 struct amd_gpu_scheduler *sched = entity->sched;
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800279 struct amd_sched_job *sched_job;
Christian König69bd5bf2015-08-26 11:31:23 +0200280
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800281 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
Christian König69bd5bf2015-08-26 11:31:23 +0200282 return NULL;
283
Christian König393a0bd2015-11-05 12:57:10 +0100284 while ((entity->dependency = sched->ops->dependency(sched_job)))
285 if (amd_sched_entity_add_dependency_cb(entity))
Christian Könige61235d2015-08-25 11:05:36 +0200286 return NULL;
Christian Könige61235d2015-08-25 11:05:36 +0200287
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800288 return sched_job;
Christian König69bd5bf2015-08-26 11:31:23 +0200289}
290
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800291/**
Christian König6c859272015-08-20 16:12:50 +0200292 * Helper to submit a job to the job queue
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800293 *
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800294 * @sched_job The pointer to job required to submit
Christian König6c859272015-08-20 16:12:50 +0200295 *
296 * Returns true if we could submit the job.
297 */
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800298static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800299{
Nicolai Hähnle786b5212015-12-02 17:35:12 +0100300 struct amd_gpu_scheduler *sched = sched_job->sched;
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800301 struct amd_sched_entity *entity = sched_job->s_entity;
Christian König6c859272015-08-20 16:12:50 +0200302 bool added, first = false;
303
304 spin_lock(&entity->queue_lock);
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800305 added = kfifo_in(&entity->job_queue, &sched_job,
306 sizeof(sched_job)) == sizeof(sched_job);
Christian König6c859272015-08-20 16:12:50 +0200307
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800308 if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
Christian König6c859272015-08-20 16:12:50 +0200309 first = true;
310
311 spin_unlock(&entity->queue_lock);
312
313 /* first job wakes up scheduler */
Chunming Zhoue8deea22015-12-11 18:22:52 +0800314 if (first) {
315 /* Add the entity to the run queue */
316 amd_sched_rq_add_entity(entity->rq, entity);
Nicolai Hähnle786b5212015-12-02 17:35:12 +0100317 amd_sched_wakeup(sched);
Chunming Zhoue8deea22015-12-11 18:22:52 +0800318 }
Christian König6c859272015-08-20 16:12:50 +0200319 return added;
320}
321
Monk Liue472d252016-03-03 19:00:50 +0800322static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) {
Christian König16a71332016-05-18 09:43:07 +0200323 struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
324 cb_free_job);
325
Monk Liue472d252016-03-03 19:00:50 +0800326 schedule_work(&job->work_free_job);
327}
328
Monk Liu0de24792016-03-04 18:51:02 +0800329/* job_finish is called after hw fence signaled, and
330 * the job had already been deleted from ring_mirror_list
331 */
Christian König7392c322016-05-18 13:00:38 +0200332static void amd_sched_job_finish(struct amd_sched_job *s_job)
Monk Liu0de24792016-03-04 18:51:02 +0800333{
334 struct amd_sched_job *next;
335 struct amd_gpu_scheduler *sched = s_job->sched;
Christian Königf42d20a92016-05-18 15:40:58 +0200336 unsigned long flags;
Monk Liu0de24792016-03-04 18:51:02 +0800337
Christian Königf42d20a92016-05-18 15:40:58 +0200338 /* remove job from ring_mirror_list */
339 spin_lock_irqsave(&sched->job_list_lock, flags);
340 list_del_init(&s_job->node);
Monk Liu0de24792016-03-04 18:51:02 +0800341 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
Monk Liub6723c82016-03-10 12:14:44 +0800342 if (cancel_delayed_work(&s_job->work_tdr))
343 amd_sched_job_put(s_job);
Monk Liu0de24792016-03-04 18:51:02 +0800344
345 /* queue TDR for next job */
346 next = list_first_entry_or_null(&sched->ring_mirror_list,
347 struct amd_sched_job, node);
348
349 if (next) {
Monk Liub6723c82016-03-10 12:14:44 +0800350 amd_sched_job_get(next);
Monk Liu0de24792016-03-04 18:51:02 +0800351 schedule_delayed_work(&next->work_tdr, sched->timeout);
352 }
353 }
Christian Königf42d20a92016-05-18 15:40:58 +0200354 spin_unlock_irqrestore(&sched->job_list_lock, flags);
Monk Liu0de24792016-03-04 18:51:02 +0800355}
356
Christian König7392c322016-05-18 13:00:38 +0200357static void amd_sched_job_begin(struct amd_sched_job *s_job)
Monk Liu0de24792016-03-04 18:51:02 +0800358{
359 struct amd_gpu_scheduler *sched = s_job->sched;
Christian Königf42d20a92016-05-18 15:40:58 +0200360 unsigned long flags;
Monk Liu0de24792016-03-04 18:51:02 +0800361
Christian Königf42d20a92016-05-18 15:40:58 +0200362 spin_lock_irqsave(&sched->job_list_lock, flags);
363 list_add_tail(&s_job->node, &sched->ring_mirror_list);
Monk Liu0de24792016-03-04 18:51:02 +0800364 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
Christian König16a71332016-05-18 09:43:07 +0200365 list_first_entry_or_null(&sched->ring_mirror_list,
366 struct amd_sched_job, node) == s_job)
Monk Liu0de24792016-03-04 18:51:02 +0800367 {
Monk Liub6723c82016-03-10 12:14:44 +0800368 amd_sched_job_get(s_job);
Monk Liu0de24792016-03-04 18:51:02 +0800369 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
370 }
Christian Königf42d20a92016-05-18 15:40:58 +0200371 spin_unlock_irqrestore(&sched->job_list_lock, flags);
Monk Liu0de24792016-03-04 18:51:02 +0800372}
373
Christian König0e51a772016-05-18 14:19:32 +0200374static void amd_sched_job_timedout(struct work_struct *work)
375{
376 struct amd_sched_job *job = container_of(work, struct amd_sched_job,
377 work_tdr.work);
378
379 job->sched->ops->timedout_job(job);
380}
381
Christian König6c859272015-08-20 16:12:50 +0200382/**
383 * Submit a job to the job queue
384 *
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800385 * @sched_job The pointer to job required to submit
Christian König6c859272015-08-20 16:12:50 +0200386 *
387 * Returns 0 for success, negative error code otherwise.
388 */
Christian Könige2840222015-11-05 19:49:48 +0100389void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
Christian König6c859272015-08-20 16:12:50 +0200390{
391 struct amd_sched_entity *entity = sched_job->s_entity;
Christian König6c859272015-08-20 16:12:50 +0200392
Monk Liue472d252016-03-03 19:00:50 +0800393 fence_add_callback(&sched_job->s_fence->base,
Christian König16a71332016-05-18 09:43:07 +0200394 &sched_job->cb_free_job, amd_sched_free_job);
Nicolai Hähnle786b5212015-12-02 17:35:12 +0100395 trace_amd_sched_job(sched_job);
Christian König0f75aee2015-09-07 18:07:14 +0200396 wait_event(entity->sched->job_scheduled,
Chunming Zhouc9f0fe52015-08-31 15:46:12 +0800397 amd_sched_entity_in(sched_job));
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800398}
399
Monk Liue6869412016-03-07 12:49:55 +0800400/* init a sched_job with basic field */
401int amd_sched_job_init(struct amd_sched_job *job,
Christian König16a71332016-05-18 09:43:07 +0200402 struct amd_gpu_scheduler *sched,
403 struct amd_sched_entity *entity,
Christian König16a71332016-05-18 09:43:07 +0200404 void (*free_cb)(struct kref *refcount),
405 void *owner, struct fence **fence)
Monk Liue6869412016-03-07 12:49:55 +0800406{
Monk Liu48350962016-03-04 14:33:44 +0800407 INIT_LIST_HEAD(&job->node);
Monk Liub6723c82016-03-10 12:14:44 +0800408 kref_init(&job->refcount);
Monk Liue6869412016-03-07 12:49:55 +0800409 job->sched = sched;
410 job->s_entity = entity;
411 job->s_fence = amd_sched_fence_create(entity, owner);
412 if (!job->s_fence)
413 return -ENOMEM;
414
Monk Liu48350962016-03-04 14:33:44 +0800415 job->s_fence->s_job = job;
Christian König0e51a772016-05-18 14:19:32 +0200416 INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
Monk Liub6723c82016-03-10 12:14:44 +0800417 job->free_callback = free_cb;
Monk Liu48350962016-03-04 14:33:44 +0800418
Monk Liue6869412016-03-07 12:49:55 +0800419 if (fence)
420 *fence = &job->s_fence->base;
421 return 0;
422}
423
Christian Könige688b7282015-08-20 17:01:01 +0200424/**
425 * Return ture if we can push more jobs to the hw.
426 */
427static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
428{
429 return atomic_read(&sched->hw_rq_count) <
430 sched->hw_submission_limit;
431}
432
433/**
Christian König88079002015-08-24 14:29:40 +0200434 * Wake up the scheduler when it is ready
435 */
436static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
437{
438 if (amd_sched_ready(sched))
Christian Königc2b6bd72015-08-25 21:39:31 +0200439 wake_up_interruptible(&sched->wake_up_worker);
Christian König88079002015-08-24 14:29:40 +0200440}
441
442/**
Christian König3d651932015-11-12 21:10:35 +0100443 * Select next entity to process
Christian Könige688b7282015-08-20 17:01:01 +0200444*/
Christian König3d651932015-11-12 21:10:35 +0100445static struct amd_sched_entity *
446amd_sched_select_entity(struct amd_gpu_scheduler *sched)
Christian Könige688b7282015-08-20 17:01:01 +0200447{
Christian König3d651932015-11-12 21:10:35 +0100448 struct amd_sched_entity *entity;
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800449 int i;
Christian Könige688b7282015-08-20 17:01:01 +0200450
451 if (!amd_sched_ready(sched))
452 return NULL;
453
454 /* Kernel run queue has higher priority than normal run queue*/
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800455 for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++) {
456 entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
457 if (entity)
458 break;
459 }
Christian Könige688b7282015-08-20 17:01:01 +0200460
Christian König3d651932015-11-12 21:10:35 +0100461 return entity;
Christian Könige688b7282015-08-20 17:01:01 +0200462}
463
Christian König6f0e54a2015-08-05 21:22:10 +0200464static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
465{
Christian König258f3f92015-08-31 17:02:52 +0200466 struct amd_sched_fence *s_fence =
467 container_of(cb, struct amd_sched_fence, cb);
Christian König9b398fa2015-09-07 18:16:49 +0200468 struct amd_gpu_scheduler *sched = s_fence->sched;
Christian König6f0e54a2015-08-05 21:22:10 +0200469
Christian Königc746ba22015-08-19 16:12:15 +0200470 atomic_dec(&sched->hw_rq_count);
Monk Liu48350962016-03-04 14:33:44 +0800471
Christian König7392c322016-05-18 13:00:38 +0200472 amd_sched_job_finish(s_fence->s_job);
Monk Liu48350962016-03-04 14:33:44 +0800473
Christian König258f3f92015-08-31 17:02:52 +0200474 amd_sched_fence_signal(s_fence);
Monk Liucccd9bc2016-03-04 14:42:26 +0800475
Chunming Zhou7034dec2015-11-11 14:56:00 +0800476 trace_amd_sched_process_job(s_fence);
Christian König258f3f92015-08-31 17:02:52 +0200477 fence_put(&s_fence->base);
Christian Königc2b6bd72015-08-25 21:39:31 +0200478 wake_up_interruptible(&sched->wake_up_worker);
Christian König6f0e54a2015-08-05 21:22:10 +0200479}
480
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800481static int amd_sched_main(void *param)
482{
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800483 struct sched_param sparam = {.sched_priority = 1};
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800484 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
Alex Deucher5134e992015-09-04 00:11:54 -0400485 int r, count;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800486
487 sched_setscheduler(current, SCHED_FIFO, &sparam);
488
489 while (!kthread_should_stop()) {
Christian König69bd5bf2015-08-26 11:31:23 +0200490 struct amd_sched_entity *entity;
Christian König258f3f92015-08-31 17:02:52 +0200491 struct amd_sched_fence *s_fence;
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800492 struct amd_sched_job *sched_job;
Christian König6f0e54a2015-08-05 21:22:10 +0200493 struct fence *fence;
494
Christian Königc2b6bd72015-08-25 21:39:31 +0200495 wait_event_interruptible(sched->wake_up_worker,
Christian König3d651932015-11-12 21:10:35 +0100496 (entity = amd_sched_select_entity(sched)) ||
497 kthread_should_stop());
Christian Königf85a6dd2015-08-19 17:37:52 +0200498
Christian König3d651932015-11-12 21:10:35 +0100499 if (!entity)
500 continue;
501
502 sched_job = amd_sched_entity_pop_job(entity);
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800503 if (!sched_job)
Christian Königf85a6dd2015-08-19 17:37:52 +0200504 continue;
505
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800506 s_fence = sched_job->s_fence;
Junwei Zhang2440ff22015-10-10 08:48:42 +0800507
Christian Königb034b572015-08-20 17:08:25 +0200508 atomic_inc(&sched->hw_rq_count);
Christian König7392c322016-05-18 13:00:38 +0200509 amd_sched_job_begin(sched_job);
Christian König7392c322016-05-18 13:00:38 +0200510
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800511 fence = sched->ops->run_job(sched_job);
Christian König393a0bd2015-11-05 12:57:10 +0100512 amd_sched_fence_scheduled(s_fence);
Christian König6f0e54a2015-08-05 21:22:10 +0200513 if (fence) {
Christian König258f3f92015-08-31 17:02:52 +0200514 r = fence_add_callback(fence, &s_fence->cb,
Christian König6f0e54a2015-08-05 21:22:10 +0200515 amd_sched_process_job);
516 if (r == -ENOENT)
Christian König258f3f92015-08-31 17:02:52 +0200517 amd_sched_process_job(fence, &s_fence->cb);
Christian König6f0e54a2015-08-05 21:22:10 +0200518 else if (r)
Christian König16a71332016-05-18 09:43:07 +0200519 DRM_ERROR("fence add callback failed (%d)\n",
520 r);
Christian König6f0e54a2015-08-05 21:22:10 +0200521 fence_put(fence);
Christian König27439fc2015-09-02 12:03:06 +0200522 } else {
523 DRM_ERROR("Failed to run job!\n");
Christian König258f3f92015-08-31 17:02:52 +0200524 amd_sched_process_job(NULL, &s_fence->cb);
Christian König6f0e54a2015-08-05 21:22:10 +0200525 }
Christian Königaef48522015-08-20 14:47:46 +0200526
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800527 count = kfifo_out(&entity->job_queue, &sched_job,
528 sizeof(sched_job));
529 WARN_ON(count != sizeof(sched_job));
Christian Königc2b6bd72015-08-25 21:39:31 +0200530 wake_up(&sched->job_scheduled);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800531 }
532 return 0;
533}
534
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800535/**
Christian König4f839a22015-09-08 20:22:31 +0200536 * Init a gpu scheduler instance
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800537 *
Christian König4f839a22015-09-08 20:22:31 +0200538 * @sched The pointer to the scheduler
Christian König69f7dd62015-08-20 17:24:40 +0200539 * @ops The backend operations for this scheduler.
Christian König69f7dd62015-08-20 17:24:40 +0200540 * @hw_submissions Number of hw submissions to do.
Christian König4f839a22015-09-08 20:22:31 +0200541 * @name Name used for debugging
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800542 *
Christian König4f839a22015-09-08 20:22:31 +0200543 * Return 0 on success, otherwise error code.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800544*/
Christian König4f839a22015-09-08 20:22:31 +0200545int amd_sched_init(struct amd_gpu_scheduler *sched,
Nils Wallménius62250a92016-04-10 16:30:00 +0200546 const struct amd_sched_backend_ops *ops,
Junwei Zhang2440ff22015-10-10 08:48:42 +0800547 unsigned hw_submission, long timeout, const char *name)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800548{
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800549 int i;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800550 sched->ops = ops;
Chunming Zhou4cef9262015-08-05 19:52:14 +0800551 sched->hw_submission_limit = hw_submission;
Christian König4f839a22015-09-08 20:22:31 +0200552 sched->name = name;
Junwei Zhang2440ff22015-10-10 08:48:42 +0800553 sched->timeout = timeout;
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800554 for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++)
555 amd_sched_rq_init(&sched->sched_rq[i]);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800556
Christian Königc2b6bd72015-08-25 21:39:31 +0200557 init_waitqueue_head(&sched->wake_up_worker);
558 init_waitqueue_head(&sched->job_scheduled);
Monk Liu48350962016-03-04 14:33:44 +0800559 INIT_LIST_HEAD(&sched->ring_mirror_list);
560 spin_lock_init(&sched->job_list_lock);
Christian Königc746ba22015-08-19 16:12:15 +0200561 atomic_set(&sched->hw_rq_count, 0);
Chunming Zhouf5617f92015-11-05 11:41:50 +0800562 if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
563 sched_fence_slab = kmem_cache_create(
564 "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
565 SLAB_HWCACHE_ALIGN, NULL);
566 if (!sched_fence_slab)
567 return -ENOMEM;
568 }
Christian König4f839a22015-09-08 20:22:31 +0200569
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800570 /* Each scheduler will run on a seperate kernel thread */
Christian Königc14692f02015-08-21 15:18:47 +0200571 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
Christian Königf4956592015-08-20 16:59:38 +0200572 if (IS_ERR(sched->thread)) {
Christian König4f839a22015-09-08 20:22:31 +0200573 DRM_ERROR("Failed to create scheduler for %s.\n", name);
574 return PTR_ERR(sched->thread);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800575 }
576
Christian König4f839a22015-09-08 20:22:31 +0200577 return 0;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800578}
579
580/**
581 * Destroy a gpu scheduler
582 *
583 * @sched The pointer to the scheduler
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800584 */
Christian König4f839a22015-09-08 20:22:31 +0200585void amd_sched_fini(struct amd_gpu_scheduler *sched)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800586{
Dave Airlie32544d02015-11-03 11:10:03 -0500587 if (sched->thread)
588 kthread_stop(sched->thread);
Chunming Zhouf5617f92015-11-05 11:41:50 +0800589 if (atomic_dec_and_test(&sched_fence_slab_ref))
590 kmem_cache_destroy(sched_fence_slab);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800591}