blob: 5ace1a74071f2c23a98cc17b141c31bab2347def [file] [log] [blame]
Jammy Zhoua72ce6f2015-05-22 18:55:07 +08001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <drm/drmP.h>
28#include "gpu_scheduler.h"
29
Chunming Zhou353da3c2015-09-07 16:06:53 +080030#define CREATE_TRACE_POINTS
31#include "gpu_sched_trace.h"
32
Christian König3d651932015-11-12 21:10:35 +010033static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
Christian König88079002015-08-24 14:29:40 +020034static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
35
Chunming Zhouf5617f92015-11-05 11:41:50 +080036struct kmem_cache *sched_fence_slab;
37atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
38
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080039/* Initialize a given run queue struct */
Christian König432a4ff2015-08-12 11:46:04 +020040static void amd_sched_rq_init(struct amd_sched_rq *rq)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080041{
Christian König2b184d82015-08-18 14:41:25 +020042 spin_lock_init(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020043 INIT_LIST_HEAD(&rq->entities);
Christian König432a4ff2015-08-12 11:46:04 +020044 rq->current_entity = NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080045}
46
Christian König432a4ff2015-08-12 11:46:04 +020047static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
48 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080049{
Christian König2b184d82015-08-18 14:41:25 +020050 spin_lock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020051 list_add_tail(&entity->list, &rq->entities);
Christian König2b184d82015-08-18 14:41:25 +020052 spin_unlock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020053}
54
55static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
56 struct amd_sched_entity *entity)
57{
Christian König2b184d82015-08-18 14:41:25 +020058 spin_lock(&rq->lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080059 list_del_init(&entity->list);
Christian König432a4ff2015-08-12 11:46:04 +020060 if (rq->current_entity == entity)
61 rq->current_entity = NULL;
Christian König2b184d82015-08-18 14:41:25 +020062 spin_unlock(&rq->lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080063}
64
65/**
Christian König3d651932015-11-12 21:10:35 +010066 * Select an entity which could provide a job to run
67 *
68 * @rq The run queue to check.
69 *
70 * Try to find a ready entity, returns NULL if none found.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080071 */
Christian König3d651932015-11-12 21:10:35 +010072static struct amd_sched_entity *
73amd_sched_rq_select_entity(struct amd_sched_rq *rq)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080074{
Christian König2b184d82015-08-18 14:41:25 +020075 struct amd_sched_entity *entity;
Christian König4cd7f42c2015-08-05 18:18:52 +020076
Christian König2b184d82015-08-18 14:41:25 +020077 spin_lock(&rq->lock);
78
79 entity = rq->current_entity;
Christian König432a4ff2015-08-12 11:46:04 +020080 if (entity) {
81 list_for_each_entry_continue(entity, &rq->entities, list) {
Christian König3d651932015-11-12 21:10:35 +010082 if (amd_sched_entity_is_ready(entity)) {
Christian König432a4ff2015-08-12 11:46:04 +020083 rq->current_entity = entity;
Christian König2b184d82015-08-18 14:41:25 +020084 spin_unlock(&rq->lock);
Christian König3d651932015-11-12 21:10:35 +010085 return entity;
Christian König432a4ff2015-08-12 11:46:04 +020086 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080087 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080088 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080089
Christian König432a4ff2015-08-12 11:46:04 +020090 list_for_each_entry(entity, &rq->entities, list) {
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080091
Christian König3d651932015-11-12 21:10:35 +010092 if (amd_sched_entity_is_ready(entity)) {
Christian König432a4ff2015-08-12 11:46:04 +020093 rq->current_entity = entity;
Christian König2b184d82015-08-18 14:41:25 +020094 spin_unlock(&rq->lock);
Christian König3d651932015-11-12 21:10:35 +010095 return entity;
Christian König432a4ff2015-08-12 11:46:04 +020096 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080097
Christian König432a4ff2015-08-12 11:46:04 +020098 if (entity == rq->current_entity)
99 break;
100 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800101
Christian König2b184d82015-08-18 14:41:25 +0200102 spin_unlock(&rq->lock);
103
Christian König432a4ff2015-08-12 11:46:04 +0200104 return NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800105}
106
107/**
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800108 * Init a context entity used by scheduler when submit to HW ring.
109 *
110 * @sched The pointer to the scheduler
Christian König91404fb2015-08-05 18:33:21 +0200111 * @entity The pointer to a valid amd_sched_entity
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800112 * @rq The run queue this entity belongs
Christian König0e89d0c2015-08-04 16:58:36 +0200113 * @kernel If this is an entity for the kernel
Jammy Zhou1333f722015-07-30 16:36:58 +0800114 * @jobs The max number of jobs in the job queue
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800115 *
116 * return 0 if succeed. negative error code on failure
117*/
Christian König91404fb2015-08-05 18:33:21 +0200118int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
Christian König6f0e54a2015-08-05 21:22:10 +0200119 struct amd_sched_entity *entity,
Christian König432a4ff2015-08-12 11:46:04 +0200120 struct amd_sched_rq *rq,
Christian König6f0e54a2015-08-05 21:22:10 +0200121 uint32_t jobs)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800122{
Christian König0f75aee2015-09-07 18:07:14 +0200123 int r;
124
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800125 if (!(sched && entity && rq))
126 return -EINVAL;
127
Christian König91404fb2015-08-05 18:33:21 +0200128 memset(entity, 0, sizeof(struct amd_sched_entity));
Christian König0f75aee2015-09-07 18:07:14 +0200129 INIT_LIST_HEAD(&entity->list);
130 entity->rq = rq;
131 entity->sched = sched;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800132
133 spin_lock_init(&entity->queue_lock);
Christian König0f75aee2015-09-07 18:07:14 +0200134 r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
135 if (r)
136 return r;
137
Christian Königce882e62015-08-19 15:00:55 +0200138 atomic_set(&entity->fence_seq, 0);
Christian König0f75aee2015-09-07 18:07:14 +0200139 entity->fence_context = fence_context_alloc(1);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800140
141 /* Add the entity to the run queue */
Christian König432a4ff2015-08-12 11:46:04 +0200142 amd_sched_rq_add_entity(rq, entity);
Christian König0f75aee2015-09-07 18:07:14 +0200143
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800144 return 0;
145}
146
147/**
148 * Query if entity is initialized
149 *
150 * @sched Pointer to scheduler instance
151 * @entity The pointer to a valid scheduler entity
152 *
153 * return true if entity is initialized, false otherwise
154*/
Christian Königd54fdb92015-08-20 17:03:48 +0200155static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
156 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800157{
Christian König0f75aee2015-09-07 18:07:14 +0200158 return entity->sched == sched &&
159 entity->rq != NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800160}
161
Christian Königaef48522015-08-20 14:47:46 +0200162/**
163 * Check if entity is idle
164 *
165 * @entity The pointer to a valid scheduler entity
166 *
167 * Return true if entity don't has any unscheduled jobs.
168 */
169static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800170{
Christian Königaef48522015-08-20 14:47:46 +0200171 rmb();
172 if (kfifo_is_empty(&entity->job_queue))
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800173 return true;
174
175 return false;
176}
177
178/**
Christian König3d651932015-11-12 21:10:35 +0100179 * Check if entity is ready
180 *
181 * @entity The pointer to a valid scheduler entity
182 *
183 * Return true if entity could provide a job.
184 */
185static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
186{
187 if (kfifo_is_empty(&entity->job_queue))
188 return false;
189
190 if (ACCESS_ONCE(entity->dependency))
191 return false;
192
193 return true;
194}
195
196/**
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800197 * Destroy a context entity
198 *
199 * @sched Pointer to scheduler instance
200 * @entity The pointer to a valid scheduler entity
201 *
Christian König062c7fb2015-08-21 15:46:43 +0200202 * Cleanup and free the allocated resources.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800203 */
Christian König062c7fb2015-08-21 15:46:43 +0200204void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
205 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800206{
Christian König0f75aee2015-09-07 18:07:14 +0200207 struct amd_sched_rq *rq = entity->rq;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800208
Christian Königd54fdb92015-08-20 17:03:48 +0200209 if (!amd_sched_entity_is_initialized(sched, entity))
Christian König062c7fb2015-08-21 15:46:43 +0200210 return;
Christian König6c859272015-08-20 16:12:50 +0200211
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800212 /**
213 * The client will not queue more IBs during this fini, consume existing
214 * queued IBs
215 */
Christian Königc2b6bd72015-08-25 21:39:31 +0200216 wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800217
Christian König432a4ff2015-08-12 11:46:04 +0200218 amd_sched_rq_remove_entity(rq, entity);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800219 kfifo_free(&entity->job_queue);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800220}
221
Christian Könige61235d2015-08-25 11:05:36 +0200222static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
223{
224 struct amd_sched_entity *entity =
225 container_of(cb, struct amd_sched_entity, cb);
226 entity->dependency = NULL;
227 fence_put(f);
Christian König0f75aee2015-09-07 18:07:14 +0200228 amd_sched_wakeup(entity->sched);
Christian Könige61235d2015-08-25 11:05:36 +0200229}
230
Christian König393a0bd2015-11-05 12:57:10 +0100231static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
232{
233 struct amd_gpu_scheduler *sched = entity->sched;
234 struct fence * fence = entity->dependency;
235 struct amd_sched_fence *s_fence;
236
237 if (fence->context == entity->fence_context) {
238 /* We can ignore fences from ourself */
239 fence_put(entity->dependency);
240 return false;
241 }
242
243 s_fence = to_amd_sched_fence(fence);
244 if (s_fence && s_fence->sched == sched) {
245 /* Fence is from the same scheduler */
246 if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) {
247 /* Ignore it when it is already scheduled */
248 fence_put(entity->dependency);
249 return false;
250 }
251
252 /* Wait for fence to be scheduled */
253 entity->cb.func = amd_sched_entity_wakeup;
254 list_add_tail(&entity->cb.node, &s_fence->scheduled_cb);
255 return true;
256 }
257
258 if (!fence_add_callback(entity->dependency, &entity->cb,
259 amd_sched_entity_wakeup))
260 return true;
261
262 fence_put(entity->dependency);
263 return false;
264}
265
Christian König69bd5bf2015-08-26 11:31:23 +0200266static struct amd_sched_job *
267amd_sched_entity_pop_job(struct amd_sched_entity *entity)
268{
Christian König0f75aee2015-09-07 18:07:14 +0200269 struct amd_gpu_scheduler *sched = entity->sched;
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800270 struct amd_sched_job *sched_job;
Christian König69bd5bf2015-08-26 11:31:23 +0200271
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800272 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
Christian König69bd5bf2015-08-26 11:31:23 +0200273 return NULL;
274
Christian König393a0bd2015-11-05 12:57:10 +0100275 while ((entity->dependency = sched->ops->dependency(sched_job)))
276 if (amd_sched_entity_add_dependency_cb(entity))
Christian Könige61235d2015-08-25 11:05:36 +0200277 return NULL;
Christian Könige61235d2015-08-25 11:05:36 +0200278
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800279 return sched_job;
Christian König69bd5bf2015-08-26 11:31:23 +0200280}
281
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800282/**
Christian König6c859272015-08-20 16:12:50 +0200283 * Helper to submit a job to the job queue
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800284 *
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800285 * @sched_job The pointer to job required to submit
Christian König6c859272015-08-20 16:12:50 +0200286 *
287 * Returns true if we could submit the job.
288 */
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800289static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800290{
Nicolai Hähnle786b5212015-12-02 17:35:12 +0100291 struct amd_gpu_scheduler *sched = sched_job->sched;
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800292 struct amd_sched_entity *entity = sched_job->s_entity;
Christian König6c859272015-08-20 16:12:50 +0200293 bool added, first = false;
294
295 spin_lock(&entity->queue_lock);
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800296 added = kfifo_in(&entity->job_queue, &sched_job,
297 sizeof(sched_job)) == sizeof(sched_job);
Christian König6c859272015-08-20 16:12:50 +0200298
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800299 if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
Christian König6c859272015-08-20 16:12:50 +0200300 first = true;
301
302 spin_unlock(&entity->queue_lock);
303
304 /* first job wakes up scheduler */
305 if (first)
Nicolai Hähnle786b5212015-12-02 17:35:12 +0100306 amd_sched_wakeup(sched);
Christian König6c859272015-08-20 16:12:50 +0200307
308 return added;
309}
310
311/**
312 * Submit a job to the job queue
313 *
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800314 * @sched_job The pointer to job required to submit
Christian König6c859272015-08-20 16:12:50 +0200315 *
316 * Returns 0 for success, negative error code otherwise.
317 */
Christian Könige2840222015-11-05 19:49:48 +0100318void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
Christian König6c859272015-08-20 16:12:50 +0200319{
320 struct amd_sched_entity *entity = sched_job->s_entity;
Christian König6c859272015-08-20 16:12:50 +0200321
Nicolai Hähnle786b5212015-12-02 17:35:12 +0100322 trace_amd_sched_job(sched_job);
Christian König0f75aee2015-09-07 18:07:14 +0200323 wait_event(entity->sched->job_scheduled,
Chunming Zhouc9f0fe52015-08-31 15:46:12 +0800324 amd_sched_entity_in(sched_job));
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800325}
326
Christian Könige688b7282015-08-20 17:01:01 +0200327/**
328 * Return ture if we can push more jobs to the hw.
329 */
330static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
331{
332 return atomic_read(&sched->hw_rq_count) <
333 sched->hw_submission_limit;
334}
335
336/**
Christian König88079002015-08-24 14:29:40 +0200337 * Wake up the scheduler when it is ready
338 */
339static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
340{
341 if (amd_sched_ready(sched))
Christian Königc2b6bd72015-08-25 21:39:31 +0200342 wake_up_interruptible(&sched->wake_up_worker);
Christian König88079002015-08-24 14:29:40 +0200343}
344
345/**
Christian König3d651932015-11-12 21:10:35 +0100346 * Select next entity to process
Christian Könige688b7282015-08-20 17:01:01 +0200347*/
Christian König3d651932015-11-12 21:10:35 +0100348static struct amd_sched_entity *
349amd_sched_select_entity(struct amd_gpu_scheduler *sched)
Christian Könige688b7282015-08-20 17:01:01 +0200350{
Christian König3d651932015-11-12 21:10:35 +0100351 struct amd_sched_entity *entity;
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800352 int i;
Christian Könige688b7282015-08-20 17:01:01 +0200353
354 if (!amd_sched_ready(sched))
355 return NULL;
356
357 /* Kernel run queue has higher priority than normal run queue*/
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800358 for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++) {
359 entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
360 if (entity)
361 break;
362 }
Christian Könige688b7282015-08-20 17:01:01 +0200363
Christian König3d651932015-11-12 21:10:35 +0100364 return entity;
Christian Könige688b7282015-08-20 17:01:01 +0200365}
366
Christian König6f0e54a2015-08-05 21:22:10 +0200367static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
368{
Christian König258f3f92015-08-31 17:02:52 +0200369 struct amd_sched_fence *s_fence =
370 container_of(cb, struct amd_sched_fence, cb);
Christian König9b398fa2015-09-07 18:16:49 +0200371 struct amd_gpu_scheduler *sched = s_fence->sched;
Junwei Zhang2440ff22015-10-10 08:48:42 +0800372 unsigned long flags;
Christian König6f0e54a2015-08-05 21:22:10 +0200373
Christian Königc746ba22015-08-19 16:12:15 +0200374 atomic_dec(&sched->hw_rq_count);
Christian König258f3f92015-08-31 17:02:52 +0200375 amd_sched_fence_signal(s_fence);
Junwei Zhang2440ff22015-10-10 08:48:42 +0800376 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
Christian König424839a2015-11-02 16:25:10 +0100377 cancel_delayed_work(&s_fence->dwork);
Junwei Zhang2440ff22015-10-10 08:48:42 +0800378 spin_lock_irqsave(&sched->fence_list_lock, flags);
379 list_del_init(&s_fence->list);
380 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
381 }
Chunming Zhou7034dec2015-11-11 14:56:00 +0800382 trace_amd_sched_process_job(s_fence);
Christian König258f3f92015-08-31 17:02:52 +0200383 fence_put(&s_fence->base);
Christian Königc2b6bd72015-08-25 21:39:31 +0200384 wake_up_interruptible(&sched->wake_up_worker);
Christian König6f0e54a2015-08-05 21:22:10 +0200385}
386
Junwei Zhang2440ff22015-10-10 08:48:42 +0800387static void amd_sched_fence_work_func(struct work_struct *work)
388{
389 struct amd_sched_fence *s_fence =
390 container_of(work, struct amd_sched_fence, dwork.work);
391 struct amd_gpu_scheduler *sched = s_fence->sched;
392 struct amd_sched_fence *entity, *tmp;
393 unsigned long flags;
394
395 DRM_ERROR("[%s] scheduler is timeout!\n", sched->name);
396
397 /* Clean all pending fences */
Junwei Zhang2fcef6e2015-10-13 11:14:23 +0800398 spin_lock_irqsave(&sched->fence_list_lock, flags);
Junwei Zhang2440ff22015-10-10 08:48:42 +0800399 list_for_each_entry_safe(entity, tmp, &sched->fence_list, list) {
400 DRM_ERROR(" fence no %d\n", entity->base.seqno);
Junwei Zhang2fcef6e2015-10-13 11:14:23 +0800401 cancel_delayed_work(&entity->dwork);
Junwei Zhang2440ff22015-10-10 08:48:42 +0800402 list_del_init(&entity->list);
Junwei Zhang2440ff22015-10-10 08:48:42 +0800403 fence_put(&entity->base);
404 }
Junwei Zhang2fcef6e2015-10-13 11:14:23 +0800405 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
Junwei Zhang2440ff22015-10-10 08:48:42 +0800406}
407
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800408static int amd_sched_main(void *param)
409{
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800410 struct sched_param sparam = {.sched_priority = 1};
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800411 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
Alex Deucher5134e992015-09-04 00:11:54 -0400412 int r, count;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800413
Junwei Zhang2440ff22015-10-10 08:48:42 +0800414 spin_lock_init(&sched->fence_list_lock);
415 INIT_LIST_HEAD(&sched->fence_list);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800416 sched_setscheduler(current, SCHED_FIFO, &sparam);
417
418 while (!kthread_should_stop()) {
Christian König69bd5bf2015-08-26 11:31:23 +0200419 struct amd_sched_entity *entity;
Christian König258f3f92015-08-31 17:02:52 +0200420 struct amd_sched_fence *s_fence;
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800421 struct amd_sched_job *sched_job;
Christian König6f0e54a2015-08-05 21:22:10 +0200422 struct fence *fence;
Junwei Zhang2440ff22015-10-10 08:48:42 +0800423 unsigned long flags;
Christian König6f0e54a2015-08-05 21:22:10 +0200424
Christian Königc2b6bd72015-08-25 21:39:31 +0200425 wait_event_interruptible(sched->wake_up_worker,
Christian König3d651932015-11-12 21:10:35 +0100426 (entity = amd_sched_select_entity(sched)) ||
427 kthread_should_stop());
Christian Königf85a6dd2015-08-19 17:37:52 +0200428
Christian König3d651932015-11-12 21:10:35 +0100429 if (!entity)
430 continue;
431
432 sched_job = amd_sched_entity_pop_job(entity);
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800433 if (!sched_job)
Christian Königf85a6dd2015-08-19 17:37:52 +0200434 continue;
435
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800436 s_fence = sched_job->s_fence;
Junwei Zhang2440ff22015-10-10 08:48:42 +0800437
438 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
439 INIT_DELAYED_WORK(&s_fence->dwork, amd_sched_fence_work_func);
440 schedule_delayed_work(&s_fence->dwork, sched->timeout);
441 spin_lock_irqsave(&sched->fence_list_lock, flags);
442 list_add_tail(&s_fence->list, &sched->fence_list);
443 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
444 }
445
Christian Königb034b572015-08-20 17:08:25 +0200446 atomic_inc(&sched->hw_rq_count);
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800447 fence = sched->ops->run_job(sched_job);
Christian König393a0bd2015-11-05 12:57:10 +0100448 amd_sched_fence_scheduled(s_fence);
Christian König6f0e54a2015-08-05 21:22:10 +0200449 if (fence) {
Christian König258f3f92015-08-31 17:02:52 +0200450 r = fence_add_callback(fence, &s_fence->cb,
Christian König6f0e54a2015-08-05 21:22:10 +0200451 amd_sched_process_job);
452 if (r == -ENOENT)
Christian König258f3f92015-08-31 17:02:52 +0200453 amd_sched_process_job(fence, &s_fence->cb);
Christian König6f0e54a2015-08-05 21:22:10 +0200454 else if (r)
455 DRM_ERROR("fence add callback failed (%d)\n", r);
456 fence_put(fence);
Christian König27439fc2015-09-02 12:03:06 +0200457 } else {
458 DRM_ERROR("Failed to run job!\n");
Christian König258f3f92015-08-31 17:02:52 +0200459 amd_sched_process_job(NULL, &s_fence->cb);
Christian König6f0e54a2015-08-05 21:22:10 +0200460 }
Christian Königaef48522015-08-20 14:47:46 +0200461
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800462 count = kfifo_out(&entity->job_queue, &sched_job,
463 sizeof(sched_job));
464 WARN_ON(count != sizeof(sched_job));
Christian Königc2b6bd72015-08-25 21:39:31 +0200465 wake_up(&sched->job_scheduled);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800466 }
467 return 0;
468}
469
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800470/**
Christian König4f839a22015-09-08 20:22:31 +0200471 * Init a gpu scheduler instance
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800472 *
Christian König4f839a22015-09-08 20:22:31 +0200473 * @sched The pointer to the scheduler
Christian König69f7dd62015-08-20 17:24:40 +0200474 * @ops The backend operations for this scheduler.
Christian König69f7dd62015-08-20 17:24:40 +0200475 * @hw_submissions Number of hw submissions to do.
Christian König4f839a22015-09-08 20:22:31 +0200476 * @name Name used for debugging
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800477 *
Christian König4f839a22015-09-08 20:22:31 +0200478 * Return 0 on success, otherwise error code.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800479*/
Christian König4f839a22015-09-08 20:22:31 +0200480int amd_sched_init(struct amd_gpu_scheduler *sched,
481 struct amd_sched_backend_ops *ops,
Junwei Zhang2440ff22015-10-10 08:48:42 +0800482 unsigned hw_submission, long timeout, const char *name)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800483{
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800484 int i;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800485 sched->ops = ops;
Chunming Zhou4cef9262015-08-05 19:52:14 +0800486 sched->hw_submission_limit = hw_submission;
Christian König4f839a22015-09-08 20:22:31 +0200487 sched->name = name;
Junwei Zhang2440ff22015-10-10 08:48:42 +0800488 sched->timeout = timeout;
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800489 for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++)
490 amd_sched_rq_init(&sched->sched_rq[i]);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800491
Christian Königc2b6bd72015-08-25 21:39:31 +0200492 init_waitqueue_head(&sched->wake_up_worker);
493 init_waitqueue_head(&sched->job_scheduled);
Christian Königc746ba22015-08-19 16:12:15 +0200494 atomic_set(&sched->hw_rq_count, 0);
Chunming Zhouf5617f92015-11-05 11:41:50 +0800495 if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
496 sched_fence_slab = kmem_cache_create(
497 "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
498 SLAB_HWCACHE_ALIGN, NULL);
499 if (!sched_fence_slab)
500 return -ENOMEM;
501 }
Christian König4f839a22015-09-08 20:22:31 +0200502
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800503 /* Each scheduler will run on a seperate kernel thread */
Christian Königc14692f02015-08-21 15:18:47 +0200504 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
Christian Königf4956592015-08-20 16:59:38 +0200505 if (IS_ERR(sched->thread)) {
Christian König4f839a22015-09-08 20:22:31 +0200506 DRM_ERROR("Failed to create scheduler for %s.\n", name);
507 return PTR_ERR(sched->thread);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800508 }
509
Christian König4f839a22015-09-08 20:22:31 +0200510 return 0;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800511}
512
513/**
514 * Destroy a gpu scheduler
515 *
516 * @sched The pointer to the scheduler
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800517 */
Christian König4f839a22015-09-08 20:22:31 +0200518void amd_sched_fini(struct amd_gpu_scheduler *sched)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800519{
Dave Airlie32544d02015-11-03 11:10:03 -0500520 if (sched->thread)
521 kthread_stop(sched->thread);
Chunming Zhouf5617f92015-11-05 11:41:50 +0800522 if (atomic_dec_and_test(&sched_fence_slab_ref))
523 kmem_cache_destroy(sched_fence_slab);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800524}