blob: 3697eeeecf82a75ef1a5b0ee44e6b4d572841053 [file] [log] [blame]
Jammy Zhoua72ce6f2015-05-22 18:55:07 +08001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <drm/drmP.h>
28#include "gpu_scheduler.h"
29
Chunming Zhou353da3c2015-09-07 16:06:53 +080030#define CREATE_TRACE_POINTS
31#include "gpu_sched_trace.h"
32
Christian König69bd5bf12015-08-26 11:31:23 +020033static struct amd_sched_job *
34amd_sched_entity_pop_job(struct amd_sched_entity *entity);
Christian König88079002015-08-24 14:29:40 +020035static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
36
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080037/* Initialize a given run queue struct */
Christian König432a4ff2015-08-12 11:46:04 +020038static void amd_sched_rq_init(struct amd_sched_rq *rq)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080039{
Christian König2b184d82015-08-18 14:41:25 +020040 spin_lock_init(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020041 INIT_LIST_HEAD(&rq->entities);
Christian König432a4ff2015-08-12 11:46:04 +020042 rq->current_entity = NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080043}
44
Christian König432a4ff2015-08-12 11:46:04 +020045static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
46 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080047{
Christian König2b184d82015-08-18 14:41:25 +020048 spin_lock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020049 list_add_tail(&entity->list, &rq->entities);
Christian König2b184d82015-08-18 14:41:25 +020050 spin_unlock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020051}
52
53static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
54 struct amd_sched_entity *entity)
55{
Christian König2b184d82015-08-18 14:41:25 +020056 spin_lock(&rq->lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080057 list_del_init(&entity->list);
Christian König432a4ff2015-08-12 11:46:04 +020058 if (rq->current_entity == entity)
59 rq->current_entity = NULL;
Christian König2b184d82015-08-18 14:41:25 +020060 spin_unlock(&rq->lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080061}
62
63/**
Christian König69bd5bf12015-08-26 11:31:23 +020064 * Select next job from a specified run queue with round robin policy.
65 * Return NULL if nothing available.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080066 */
Christian König69bd5bf12015-08-26 11:31:23 +020067static struct amd_sched_job *
68amd_sched_rq_select_job(struct amd_sched_rq *rq)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080069{
Christian König2b184d82015-08-18 14:41:25 +020070 struct amd_sched_entity *entity;
Junwei Zhang4c7eb912015-09-09 09:05:55 +080071 struct amd_sched_job *sched_job;
Christian König4cd7f42c2015-08-05 18:18:52 +020072
Christian König2b184d82015-08-18 14:41:25 +020073 spin_lock(&rq->lock);
74
75 entity = rq->current_entity;
Christian König432a4ff2015-08-12 11:46:04 +020076 if (entity) {
77 list_for_each_entry_continue(entity, &rq->entities, list) {
Junwei Zhang4c7eb912015-09-09 09:05:55 +080078 sched_job = amd_sched_entity_pop_job(entity);
79 if (sched_job) {
Christian König432a4ff2015-08-12 11:46:04 +020080 rq->current_entity = entity;
Christian König2b184d82015-08-18 14:41:25 +020081 spin_unlock(&rq->lock);
Junwei Zhang4c7eb912015-09-09 09:05:55 +080082 return sched_job;
Christian König432a4ff2015-08-12 11:46:04 +020083 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080084 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080085 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080086
Christian König432a4ff2015-08-12 11:46:04 +020087 list_for_each_entry(entity, &rq->entities, list) {
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080088
Junwei Zhang4c7eb912015-09-09 09:05:55 +080089 sched_job = amd_sched_entity_pop_job(entity);
90 if (sched_job) {
Christian König432a4ff2015-08-12 11:46:04 +020091 rq->current_entity = entity;
Christian König2b184d82015-08-18 14:41:25 +020092 spin_unlock(&rq->lock);
Junwei Zhang4c7eb912015-09-09 09:05:55 +080093 return sched_job;
Christian König432a4ff2015-08-12 11:46:04 +020094 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080095
Christian König432a4ff2015-08-12 11:46:04 +020096 if (entity == rq->current_entity)
97 break;
98 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080099
Christian König2b184d82015-08-18 14:41:25 +0200100 spin_unlock(&rq->lock);
101
Christian König432a4ff2015-08-12 11:46:04 +0200102 return NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800103}
104
105/**
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800106 * Init a context entity used by scheduler when submit to HW ring.
107 *
108 * @sched The pointer to the scheduler
Christian König91404fb2015-08-05 18:33:21 +0200109 * @entity The pointer to a valid amd_sched_entity
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800110 * @rq The run queue this entity belongs
Christian König0e89d0c2015-08-04 16:58:36 +0200111 * @kernel If this is an entity for the kernel
Jammy Zhou1333f722015-07-30 16:36:58 +0800112 * @jobs The max number of jobs in the job queue
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800113 *
114 * return 0 if succeed. negative error code on failure
115*/
Christian König91404fb2015-08-05 18:33:21 +0200116int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
Christian König6f0e54a2015-08-05 21:22:10 +0200117 struct amd_sched_entity *entity,
Christian König432a4ff2015-08-12 11:46:04 +0200118 struct amd_sched_rq *rq,
Christian König6f0e54a2015-08-05 21:22:10 +0200119 uint32_t jobs)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800120{
Christian König0f75aee2015-09-07 18:07:14 +0200121 int r;
122
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800123 if (!(sched && entity && rq))
124 return -EINVAL;
125
Christian König91404fb2015-08-05 18:33:21 +0200126 memset(entity, 0, sizeof(struct amd_sched_entity));
Christian König0f75aee2015-09-07 18:07:14 +0200127 INIT_LIST_HEAD(&entity->list);
128 entity->rq = rq;
129 entity->sched = sched;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800130
131 spin_lock_init(&entity->queue_lock);
Christian König0f75aee2015-09-07 18:07:14 +0200132 r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
133 if (r)
134 return r;
135
Christian Königce882e62015-08-19 15:00:55 +0200136 atomic_set(&entity->fence_seq, 0);
Christian König0f75aee2015-09-07 18:07:14 +0200137 entity->fence_context = fence_context_alloc(1);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800138
139 /* Add the entity to the run queue */
Christian König432a4ff2015-08-12 11:46:04 +0200140 amd_sched_rq_add_entity(rq, entity);
Christian König0f75aee2015-09-07 18:07:14 +0200141
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800142 return 0;
143}
144
145/**
146 * Query if entity is initialized
147 *
148 * @sched Pointer to scheduler instance
149 * @entity The pointer to a valid scheduler entity
150 *
151 * return true if entity is initialized, false otherwise
152*/
Christian Königd54fdb92015-08-20 17:03:48 +0200153static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
154 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800155{
Christian König0f75aee2015-09-07 18:07:14 +0200156 return entity->sched == sched &&
157 entity->rq != NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800158}
159
Christian Königaef48522015-08-20 14:47:46 +0200160/**
161 * Check if entity is idle
162 *
163 * @entity The pointer to a valid scheduler entity
164 *
165 * Return true if entity don't has any unscheduled jobs.
166 */
167static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800168{
Christian Königaef48522015-08-20 14:47:46 +0200169 rmb();
170 if (kfifo_is_empty(&entity->job_queue))
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800171 return true;
172
173 return false;
174}
175
176/**
177 * Destroy a context entity
178 *
179 * @sched Pointer to scheduler instance
180 * @entity The pointer to a valid scheduler entity
181 *
Christian König062c7fb2015-08-21 15:46:43 +0200182 * Cleanup and free the allocated resources.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800183 */
Christian König062c7fb2015-08-21 15:46:43 +0200184void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
185 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800186{
Christian König0f75aee2015-09-07 18:07:14 +0200187 struct amd_sched_rq *rq = entity->rq;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800188
Christian Königd54fdb92015-08-20 17:03:48 +0200189 if (!amd_sched_entity_is_initialized(sched, entity))
Christian König062c7fb2015-08-21 15:46:43 +0200190 return;
Christian König6c859272015-08-20 16:12:50 +0200191
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800192 /**
193 * The client will not queue more IBs during this fini, consume existing
194 * queued IBs
195 */
Christian Königc2b6bd72015-08-25 21:39:31 +0200196 wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800197
Christian König432a4ff2015-08-12 11:46:04 +0200198 amd_sched_rq_remove_entity(rq, entity);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800199 kfifo_free(&entity->job_queue);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800200}
201
Christian Könige61235d2015-08-25 11:05:36 +0200202static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
203{
204 struct amd_sched_entity *entity =
205 container_of(cb, struct amd_sched_entity, cb);
206 entity->dependency = NULL;
207 fence_put(f);
Christian König0f75aee2015-09-07 18:07:14 +0200208 amd_sched_wakeup(entity->sched);
Christian Könige61235d2015-08-25 11:05:36 +0200209}
210
Christian König69bd5bf12015-08-26 11:31:23 +0200211static struct amd_sched_job *
212amd_sched_entity_pop_job(struct amd_sched_entity *entity)
213{
Christian König0f75aee2015-09-07 18:07:14 +0200214 struct amd_gpu_scheduler *sched = entity->sched;
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800215 struct amd_sched_job *sched_job;
Christian König69bd5bf12015-08-26 11:31:23 +0200216
Christian Könige61235d2015-08-25 11:05:36 +0200217 if (ACCESS_ONCE(entity->dependency))
218 return NULL;
219
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800220 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
Christian König69bd5bf12015-08-26 11:31:23 +0200221 return NULL;
222
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800223 while ((entity->dependency = sched->ops->dependency(sched_job))) {
Christian Könige61235d2015-08-25 11:05:36 +0200224
225 if (fence_add_callback(entity->dependency, &entity->cb,
226 amd_sched_entity_wakeup))
227 fence_put(entity->dependency);
228 else
229 return NULL;
230 }
231
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800232 return sched_job;
Christian König69bd5bf12015-08-26 11:31:23 +0200233}
234
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800235/**
Christian König6c859272015-08-20 16:12:50 +0200236 * Helper to submit a job to the job queue
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800237 *
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800238 * @sched_job The pointer to job required to submit
Christian König6c859272015-08-20 16:12:50 +0200239 *
240 * Returns true if we could submit the job.
241 */
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800242static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800243{
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800244 struct amd_sched_entity *entity = sched_job->s_entity;
Christian König6c859272015-08-20 16:12:50 +0200245 bool added, first = false;
246
247 spin_lock(&entity->queue_lock);
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800248 added = kfifo_in(&entity->job_queue, &sched_job,
249 sizeof(sched_job)) == sizeof(sched_job);
Christian König6c859272015-08-20 16:12:50 +0200250
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800251 if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
Christian König6c859272015-08-20 16:12:50 +0200252 first = true;
253
254 spin_unlock(&entity->queue_lock);
255
256 /* first job wakes up scheduler */
257 if (first)
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800258 amd_sched_wakeup(sched_job->sched);
Christian König6c859272015-08-20 16:12:50 +0200259
260 return added;
261}
262
263/**
264 * Submit a job to the job queue
265 *
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800266 * @sched_job The pointer to job required to submit
Christian König6c859272015-08-20 16:12:50 +0200267 *
268 * Returns 0 for success, negative error code otherwise.
269 */
270int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
271{
272 struct amd_sched_entity *entity = sched_job->s_entity;
Chunming Zhou84f76ea2015-08-24 12:47:36 +0800273 struct amd_sched_fence *fence = amd_sched_fence_create(
274 entity, sched_job->owner);
Christian König6c859272015-08-20 16:12:50 +0200275
Chunming Zhouf556cb0c2015-08-02 11:18:04 +0800276 if (!fence)
Christian König6c859272015-08-20 16:12:50 +0200277 return -ENOMEM;
278
Chunming Zhoubb977d32015-08-18 15:16:40 +0800279 fence_get(&fence->base);
280 sched_job->s_fence = fence;
Christian König6c859272015-08-20 16:12:50 +0200281
Christian König0f75aee2015-09-07 18:07:14 +0200282 wait_event(entity->sched->job_scheduled,
Chunming Zhouc9f0fe52015-08-31 15:46:12 +0800283 amd_sched_entity_in(sched_job));
Chunming Zhou353da3c2015-09-07 16:06:53 +0800284 trace_amd_sched_job(sched_job);
Chunming Zhouc9f0fe52015-08-31 15:46:12 +0800285 return 0;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800286}
287
Christian Könige688b7282015-08-20 17:01:01 +0200288/**
289 * Return ture if we can push more jobs to the hw.
290 */
291static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
292{
293 return atomic_read(&sched->hw_rq_count) <
294 sched->hw_submission_limit;
295}
296
297/**
Christian König88079002015-08-24 14:29:40 +0200298 * Wake up the scheduler when it is ready
299 */
300static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
301{
302 if (amd_sched_ready(sched))
Christian Königc2b6bd72015-08-25 21:39:31 +0200303 wake_up_interruptible(&sched->wake_up_worker);
Christian König88079002015-08-24 14:29:40 +0200304}
305
306/**
Christian König69bd5bf12015-08-26 11:31:23 +0200307 * Select next to run
Christian Könige688b7282015-08-20 17:01:01 +0200308*/
Christian König69bd5bf12015-08-26 11:31:23 +0200309static struct amd_sched_job *
310amd_sched_select_job(struct amd_gpu_scheduler *sched)
Christian Könige688b7282015-08-20 17:01:01 +0200311{
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800312 struct amd_sched_job *sched_job;
Christian Könige688b7282015-08-20 17:01:01 +0200313
314 if (!amd_sched_ready(sched))
315 return NULL;
316
317 /* Kernel run queue has higher priority than normal run queue*/
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800318 sched_job = amd_sched_rq_select_job(&sched->kernel_rq);
319 if (sched_job == NULL)
320 sched_job = amd_sched_rq_select_job(&sched->sched_rq);
Christian Könige688b7282015-08-20 17:01:01 +0200321
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800322 return sched_job;
Christian Könige688b7282015-08-20 17:01:01 +0200323}
324
Christian König6f0e54a2015-08-05 21:22:10 +0200325static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
326{
Christian König258f3f92015-08-31 17:02:52 +0200327 struct amd_sched_fence *s_fence =
328 container_of(cb, struct amd_sched_fence, cb);
Christian König9b398fa2015-09-07 18:16:49 +0200329 struct amd_gpu_scheduler *sched = s_fence->sched;
Christian König6f0e54a2015-08-05 21:22:10 +0200330
Christian Königc746ba22015-08-19 16:12:15 +0200331 atomic_dec(&sched->hw_rq_count);
Christian König258f3f92015-08-31 17:02:52 +0200332 amd_sched_fence_signal(s_fence);
333 fence_put(&s_fence->base);
Christian Königc2b6bd72015-08-25 21:39:31 +0200334 wake_up_interruptible(&sched->wake_up_worker);
Christian König6f0e54a2015-08-05 21:22:10 +0200335}
336
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800337static int amd_sched_main(void *param)
338{
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800339 struct sched_param sparam = {.sched_priority = 1};
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800340 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
Alex Deucher5134e992015-09-04 00:11:54 -0400341 int r, count;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800342
343 sched_setscheduler(current, SCHED_FIFO, &sparam);
344
345 while (!kthread_should_stop()) {
Christian König69bd5bf12015-08-26 11:31:23 +0200346 struct amd_sched_entity *entity;
Christian König258f3f92015-08-31 17:02:52 +0200347 struct amd_sched_fence *s_fence;
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800348 struct amd_sched_job *sched_job;
Christian König6f0e54a2015-08-05 21:22:10 +0200349 struct fence *fence;
350
Christian Königc2b6bd72015-08-25 21:39:31 +0200351 wait_event_interruptible(sched->wake_up_worker,
Christian Königf85a6dd2015-08-19 17:37:52 +0200352 kthread_should_stop() ||
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800353 (sched_job = amd_sched_select_job(sched)));
Christian Königf85a6dd2015-08-19 17:37:52 +0200354
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800355 if (!sched_job)
Christian Königf85a6dd2015-08-19 17:37:52 +0200356 continue;
357
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800358 entity = sched_job->s_entity;
359 s_fence = sched_job->s_fence;
Christian Königb034b572015-08-20 17:08:25 +0200360 atomic_inc(&sched->hw_rq_count);
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800361 fence = sched->ops->run_job(sched_job);
Christian König6f0e54a2015-08-05 21:22:10 +0200362 if (fence) {
Christian König258f3f92015-08-31 17:02:52 +0200363 r = fence_add_callback(fence, &s_fence->cb,
Christian König6f0e54a2015-08-05 21:22:10 +0200364 amd_sched_process_job);
365 if (r == -ENOENT)
Christian König258f3f92015-08-31 17:02:52 +0200366 amd_sched_process_job(fence, &s_fence->cb);
Christian König6f0e54a2015-08-05 21:22:10 +0200367 else if (r)
368 DRM_ERROR("fence add callback failed (%d)\n", r);
369 fence_put(fence);
Christian König27439fc2015-09-02 12:03:06 +0200370 } else {
371 DRM_ERROR("Failed to run job!\n");
Christian König258f3f92015-08-31 17:02:52 +0200372 amd_sched_process_job(NULL, &s_fence->cb);
Christian König6f0e54a2015-08-05 21:22:10 +0200373 }
Christian Königaef48522015-08-20 14:47:46 +0200374
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800375 count = kfifo_out(&entity->job_queue, &sched_job,
376 sizeof(sched_job));
377 WARN_ON(count != sizeof(sched_job));
Christian Königc2b6bd72015-08-25 21:39:31 +0200378 wake_up(&sched->job_scheduled);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800379 }
380 return 0;
381}
382
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800383/**
Christian König4f839a22015-09-08 20:22:31 +0200384 * Init a gpu scheduler instance
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800385 *
Christian König4f839a22015-09-08 20:22:31 +0200386 * @sched The pointer to the scheduler
Christian König69f7dd62015-08-20 17:24:40 +0200387 * @ops The backend operations for this scheduler.
Christian König69f7dd62015-08-20 17:24:40 +0200388 * @hw_submissions Number of hw submissions to do.
Christian König4f839a22015-09-08 20:22:31 +0200389 * @name Name used for debugging
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800390 *
Christian König4f839a22015-09-08 20:22:31 +0200391 * Return 0 on success, otherwise error code.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800392*/
Christian König4f839a22015-09-08 20:22:31 +0200393int amd_sched_init(struct amd_gpu_scheduler *sched,
394 struct amd_sched_backend_ops *ops,
395 unsigned hw_submission, const char *name)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800396{
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800397 sched->ops = ops;
Chunming Zhou4cef9262015-08-05 19:52:14 +0800398 sched->hw_submission_limit = hw_submission;
Christian König4f839a22015-09-08 20:22:31 +0200399 sched->name = name;
Christian König432a4ff2015-08-12 11:46:04 +0200400 amd_sched_rq_init(&sched->sched_rq);
401 amd_sched_rq_init(&sched->kernel_rq);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800402
Christian Königc2b6bd72015-08-25 21:39:31 +0200403 init_waitqueue_head(&sched->wake_up_worker);
404 init_waitqueue_head(&sched->job_scheduled);
Christian Königc746ba22015-08-19 16:12:15 +0200405 atomic_set(&sched->hw_rq_count, 0);
Christian König4f839a22015-09-08 20:22:31 +0200406
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800407 /* Each scheduler will run on a seperate kernel thread */
Christian Königc14692f02015-08-21 15:18:47 +0200408 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
Christian Königf4956592015-08-20 16:59:38 +0200409 if (IS_ERR(sched->thread)) {
Christian König4f839a22015-09-08 20:22:31 +0200410 DRM_ERROR("Failed to create scheduler for %s.\n", name);
411 return PTR_ERR(sched->thread);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800412 }
413
Christian König4f839a22015-09-08 20:22:31 +0200414 return 0;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800415}
416
417/**
418 * Destroy a gpu scheduler
419 *
420 * @sched The pointer to the scheduler
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800421 */
Christian König4f839a22015-09-08 20:22:31 +0200422void amd_sched_fini(struct amd_gpu_scheduler *sched)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800423{
424 kthread_stop(sched->thread);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800425}