blob: 5c8dcf89297affd49ce1b86705564ac63ad0e78d [file] [log] [blame]
Jammy Zhoua72ce6f2015-05-22 18:55:07 +08001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <drm/drmP.h>
28#include "gpu_scheduler.h"
29
Chunming Zhou353da3c2015-09-07 16:06:53 +080030#define CREATE_TRACE_POINTS
31#include "gpu_sched_trace.h"
32
Christian König69bd5bf2015-08-26 11:31:23 +020033static struct amd_sched_job *
34amd_sched_entity_pop_job(struct amd_sched_entity *entity);
Christian König88079002015-08-24 14:29:40 +020035static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
36
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080037/* Initialize a given run queue struct */
Christian König432a4ff2015-08-12 11:46:04 +020038static void amd_sched_rq_init(struct amd_sched_rq *rq)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080039{
Christian König2b184d82015-08-18 14:41:25 +020040 spin_lock_init(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020041 INIT_LIST_HEAD(&rq->entities);
Christian König432a4ff2015-08-12 11:46:04 +020042 rq->current_entity = NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080043}
44
Christian König432a4ff2015-08-12 11:46:04 +020045static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
46 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080047{
Christian König2b184d82015-08-18 14:41:25 +020048 spin_lock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020049 list_add_tail(&entity->list, &rq->entities);
Christian König2b184d82015-08-18 14:41:25 +020050 spin_unlock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020051}
52
53static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
54 struct amd_sched_entity *entity)
55{
Christian König2b184d82015-08-18 14:41:25 +020056 spin_lock(&rq->lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080057 list_del_init(&entity->list);
Christian König432a4ff2015-08-12 11:46:04 +020058 if (rq->current_entity == entity)
59 rq->current_entity = NULL;
Christian König2b184d82015-08-18 14:41:25 +020060 spin_unlock(&rq->lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080061}
62
63/**
Christian König69bd5bf2015-08-26 11:31:23 +020064 * Select next job from a specified run queue with round robin policy.
65 * Return NULL if nothing available.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080066 */
Christian König69bd5bf2015-08-26 11:31:23 +020067static struct amd_sched_job *
68amd_sched_rq_select_job(struct amd_sched_rq *rq)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080069{
Christian König2b184d82015-08-18 14:41:25 +020070 struct amd_sched_entity *entity;
Junwei Zhang4c7eb912015-09-09 09:05:55 +080071 struct amd_sched_job *sched_job;
Christian König4cd7f42c2015-08-05 18:18:52 +020072
Christian König2b184d82015-08-18 14:41:25 +020073 spin_lock(&rq->lock);
74
75 entity = rq->current_entity;
Christian König432a4ff2015-08-12 11:46:04 +020076 if (entity) {
77 list_for_each_entry_continue(entity, &rq->entities, list) {
Junwei Zhang4c7eb912015-09-09 09:05:55 +080078 sched_job = amd_sched_entity_pop_job(entity);
79 if (sched_job) {
Christian König432a4ff2015-08-12 11:46:04 +020080 rq->current_entity = entity;
Christian König2b184d82015-08-18 14:41:25 +020081 spin_unlock(&rq->lock);
Junwei Zhang4c7eb912015-09-09 09:05:55 +080082 return sched_job;
Christian König432a4ff2015-08-12 11:46:04 +020083 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080084 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080085 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080086
Christian König432a4ff2015-08-12 11:46:04 +020087 list_for_each_entry(entity, &rq->entities, list) {
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080088
Junwei Zhang4c7eb912015-09-09 09:05:55 +080089 sched_job = amd_sched_entity_pop_job(entity);
90 if (sched_job) {
Christian König432a4ff2015-08-12 11:46:04 +020091 rq->current_entity = entity;
Christian König2b184d82015-08-18 14:41:25 +020092 spin_unlock(&rq->lock);
Junwei Zhang4c7eb912015-09-09 09:05:55 +080093 return sched_job;
Christian König432a4ff2015-08-12 11:46:04 +020094 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080095
Christian König432a4ff2015-08-12 11:46:04 +020096 if (entity == rq->current_entity)
97 break;
98 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080099
Christian König2b184d82015-08-18 14:41:25 +0200100 spin_unlock(&rq->lock);
101
Christian König432a4ff2015-08-12 11:46:04 +0200102 return NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800103}
104
105/**
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800106 * Init a context entity used by scheduler when submit to HW ring.
107 *
108 * @sched The pointer to the scheduler
Christian König91404fb2015-08-05 18:33:21 +0200109 * @entity The pointer to a valid amd_sched_entity
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800110 * @rq The run queue this entity belongs
Christian König0e89d0c2015-08-04 16:58:36 +0200111 * @kernel If this is an entity for the kernel
Jammy Zhou1333f722015-07-30 16:36:58 +0800112 * @jobs The max number of jobs in the job queue
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800113 *
114 * return 0 if succeed. negative error code on failure
115*/
Christian König91404fb2015-08-05 18:33:21 +0200116int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
Christian König6f0e54a2015-08-05 21:22:10 +0200117 struct amd_sched_entity *entity,
Christian König432a4ff2015-08-12 11:46:04 +0200118 struct amd_sched_rq *rq,
Christian König6f0e54a2015-08-05 21:22:10 +0200119 uint32_t jobs)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800120{
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800121 if (!(sched && entity && rq))
122 return -EINVAL;
123
Christian König91404fb2015-08-05 18:33:21 +0200124 memset(entity, 0, sizeof(struct amd_sched_entity));
Christian König91404fb2015-08-05 18:33:21 +0200125 entity->belongto_rq = rq;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800126 entity->scheduler = sched;
Chunming Zhouf556cb0c2015-08-02 11:18:04 +0800127 entity->fence_context = fence_context_alloc(1);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800128 if(kfifo_alloc(&entity->job_queue,
Jammy Zhou1333f722015-07-30 16:36:58 +0800129 jobs * sizeof(void *),
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800130 GFP_KERNEL))
131 return -EINVAL;
132
133 spin_lock_init(&entity->queue_lock);
Christian Königce882e62015-08-19 15:00:55 +0200134 atomic_set(&entity->fence_seq, 0);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800135
136 /* Add the entity to the run queue */
Christian König432a4ff2015-08-12 11:46:04 +0200137 amd_sched_rq_add_entity(rq, entity);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800138 return 0;
139}
140
141/**
142 * Query if entity is initialized
143 *
144 * @sched Pointer to scheduler instance
145 * @entity The pointer to a valid scheduler entity
146 *
147 * return true if entity is initialized, false otherwise
148*/
Christian Königd54fdb92015-08-20 17:03:48 +0200149static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
150 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800151{
152 return entity->scheduler == sched &&
Christian König91404fb2015-08-05 18:33:21 +0200153 entity->belongto_rq != NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800154}
155
Christian Königaef48522015-08-20 14:47:46 +0200156/**
157 * Check if entity is idle
158 *
159 * @entity The pointer to a valid scheduler entity
160 *
161 * Return true if entity don't has any unscheduled jobs.
162 */
163static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800164{
Christian Königaef48522015-08-20 14:47:46 +0200165 rmb();
166 if (kfifo_is_empty(&entity->job_queue))
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800167 return true;
168
169 return false;
170}
171
172/**
173 * Destroy a context entity
174 *
175 * @sched Pointer to scheduler instance
176 * @entity The pointer to a valid scheduler entity
177 *
Christian König062c7fb2015-08-21 15:46:43 +0200178 * Cleanup and free the allocated resources.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800179 */
Christian König062c7fb2015-08-21 15:46:43 +0200180void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
181 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800182{
Christian König432a4ff2015-08-12 11:46:04 +0200183 struct amd_sched_rq *rq = entity->belongto_rq;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800184
Christian Königd54fdb92015-08-20 17:03:48 +0200185 if (!amd_sched_entity_is_initialized(sched, entity))
Christian König062c7fb2015-08-21 15:46:43 +0200186 return;
Christian König6c859272015-08-20 16:12:50 +0200187
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800188 /**
189 * The client will not queue more IBs during this fini, consume existing
190 * queued IBs
191 */
Christian Königc2b6bd72015-08-25 21:39:31 +0200192 wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800193
Christian König432a4ff2015-08-12 11:46:04 +0200194 amd_sched_rq_remove_entity(rq, entity);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800195 kfifo_free(&entity->job_queue);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800196}
197
Christian Könige61235d2015-08-25 11:05:36 +0200198static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
199{
200 struct amd_sched_entity *entity =
201 container_of(cb, struct amd_sched_entity, cb);
202 entity->dependency = NULL;
203 fence_put(f);
204 amd_sched_wakeup(entity->scheduler);
205}
206
Christian König69bd5bf2015-08-26 11:31:23 +0200207static struct amd_sched_job *
208amd_sched_entity_pop_job(struct amd_sched_entity *entity)
209{
Christian Könige61235d2015-08-25 11:05:36 +0200210 struct amd_gpu_scheduler *sched = entity->scheduler;
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800211 struct amd_sched_job *sched_job;
Christian König69bd5bf2015-08-26 11:31:23 +0200212
Christian Könige61235d2015-08-25 11:05:36 +0200213 if (ACCESS_ONCE(entity->dependency))
214 return NULL;
215
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800216 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
Christian König69bd5bf2015-08-26 11:31:23 +0200217 return NULL;
218
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800219 while ((entity->dependency = sched->ops->dependency(sched_job))) {
Christian Könige61235d2015-08-25 11:05:36 +0200220
221 if (fence_add_callback(entity->dependency, &entity->cb,
222 amd_sched_entity_wakeup))
223 fence_put(entity->dependency);
224 else
225 return NULL;
226 }
227
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800228 return sched_job;
Christian König69bd5bf2015-08-26 11:31:23 +0200229}
230
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800231/**
Christian König6c859272015-08-20 16:12:50 +0200232 * Helper to submit a job to the job queue
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800233 *
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800234 * @sched_job The pointer to job required to submit
Christian König6c859272015-08-20 16:12:50 +0200235 *
236 * Returns true if we could submit the job.
237 */
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800238static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800239{
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800240 struct amd_sched_entity *entity = sched_job->s_entity;
Christian König6c859272015-08-20 16:12:50 +0200241 bool added, first = false;
242
243 spin_lock(&entity->queue_lock);
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800244 added = kfifo_in(&entity->job_queue, &sched_job,
245 sizeof(sched_job)) == sizeof(sched_job);
Christian König6c859272015-08-20 16:12:50 +0200246
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800247 if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
Christian König6c859272015-08-20 16:12:50 +0200248 first = true;
249
250 spin_unlock(&entity->queue_lock);
251
252 /* first job wakes up scheduler */
253 if (first)
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800254 amd_sched_wakeup(sched_job->sched);
Christian König6c859272015-08-20 16:12:50 +0200255
256 return added;
257}
258
259/**
260 * Submit a job to the job queue
261 *
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800262 * @sched_job The pointer to job required to submit
Christian König6c859272015-08-20 16:12:50 +0200263 *
264 * Returns 0 for success, negative error code otherwise.
265 */
266int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
267{
268 struct amd_sched_entity *entity = sched_job->s_entity;
Chunming Zhou84f76ea2015-08-24 12:47:36 +0800269 struct amd_sched_fence *fence = amd_sched_fence_create(
270 entity, sched_job->owner);
Christian König6c859272015-08-20 16:12:50 +0200271
Chunming Zhouf556cb0c2015-08-02 11:18:04 +0800272 if (!fence)
Christian König6c859272015-08-20 16:12:50 +0200273 return -ENOMEM;
274
Chunming Zhoubb977d32015-08-18 15:16:40 +0800275 fence_get(&fence->base);
276 sched_job->s_fence = fence;
Christian König6c859272015-08-20 16:12:50 +0200277
Chunming Zhouc9f0fe52015-08-31 15:46:12 +0800278 wait_event(entity->scheduler->job_scheduled,
279 amd_sched_entity_in(sched_job));
Chunming Zhou353da3c2015-09-07 16:06:53 +0800280 trace_amd_sched_job(sched_job);
Chunming Zhouc9f0fe52015-08-31 15:46:12 +0800281 return 0;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800282}
283
Christian Könige688b7282015-08-20 17:01:01 +0200284/**
285 * Return ture if we can push more jobs to the hw.
286 */
287static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
288{
289 return atomic_read(&sched->hw_rq_count) <
290 sched->hw_submission_limit;
291}
292
293/**
Christian König88079002015-08-24 14:29:40 +0200294 * Wake up the scheduler when it is ready
295 */
296static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
297{
298 if (amd_sched_ready(sched))
Christian Königc2b6bd72015-08-25 21:39:31 +0200299 wake_up_interruptible(&sched->wake_up_worker);
Christian König88079002015-08-24 14:29:40 +0200300}
301
302/**
Christian König69bd5bf2015-08-26 11:31:23 +0200303 * Select next to run
Christian Könige688b7282015-08-20 17:01:01 +0200304*/
Christian König69bd5bf2015-08-26 11:31:23 +0200305static struct amd_sched_job *
306amd_sched_select_job(struct amd_gpu_scheduler *sched)
Christian Könige688b7282015-08-20 17:01:01 +0200307{
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800308 struct amd_sched_job *sched_job;
Christian Könige688b7282015-08-20 17:01:01 +0200309
310 if (!amd_sched_ready(sched))
311 return NULL;
312
313 /* Kernel run queue has higher priority than normal run queue*/
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800314 sched_job = amd_sched_rq_select_job(&sched->kernel_rq);
315 if (sched_job == NULL)
316 sched_job = amd_sched_rq_select_job(&sched->sched_rq);
Christian Könige688b7282015-08-20 17:01:01 +0200317
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800318 return sched_job;
Christian Könige688b7282015-08-20 17:01:01 +0200319}
320
Christian König6f0e54a2015-08-05 21:22:10 +0200321static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
322{
Christian König258f3f92015-08-31 17:02:52 +0200323 struct amd_sched_fence *s_fence =
324 container_of(cb, struct amd_sched_fence, cb);
325 struct amd_gpu_scheduler *sched = s_fence->scheduler;
Christian König6f0e54a2015-08-05 21:22:10 +0200326
Christian Königc746ba22015-08-19 16:12:15 +0200327 atomic_dec(&sched->hw_rq_count);
Christian König258f3f92015-08-31 17:02:52 +0200328 amd_sched_fence_signal(s_fence);
329 fence_put(&s_fence->base);
Christian Königc2b6bd72015-08-25 21:39:31 +0200330 wake_up_interruptible(&sched->wake_up_worker);
Christian König6f0e54a2015-08-05 21:22:10 +0200331}
332
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800333static int amd_sched_main(void *param)
334{
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800335 struct sched_param sparam = {.sched_priority = 1};
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800336 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
Alex Deucher5134e992015-09-04 00:11:54 -0400337 int r, count;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800338
339 sched_setscheduler(current, SCHED_FIFO, &sparam);
340
341 while (!kthread_should_stop()) {
Christian König69bd5bf2015-08-26 11:31:23 +0200342 struct amd_sched_entity *entity;
Christian König258f3f92015-08-31 17:02:52 +0200343 struct amd_sched_fence *s_fence;
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800344 struct amd_sched_job *sched_job;
Christian König6f0e54a2015-08-05 21:22:10 +0200345 struct fence *fence;
346
Christian Königc2b6bd72015-08-25 21:39:31 +0200347 wait_event_interruptible(sched->wake_up_worker,
Christian Königf85a6dd2015-08-19 17:37:52 +0200348 kthread_should_stop() ||
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800349 (sched_job = amd_sched_select_job(sched)));
Christian Königf85a6dd2015-08-19 17:37:52 +0200350
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800351 if (!sched_job)
Christian Königf85a6dd2015-08-19 17:37:52 +0200352 continue;
353
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800354 entity = sched_job->s_entity;
355 s_fence = sched_job->s_fence;
Christian Königb034b572015-08-20 17:08:25 +0200356 atomic_inc(&sched->hw_rq_count);
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800357 fence = sched->ops->run_job(sched_job);
Christian König6f0e54a2015-08-05 21:22:10 +0200358 if (fence) {
Christian König258f3f92015-08-31 17:02:52 +0200359 r = fence_add_callback(fence, &s_fence->cb,
Christian König6f0e54a2015-08-05 21:22:10 +0200360 amd_sched_process_job);
361 if (r == -ENOENT)
Christian König258f3f92015-08-31 17:02:52 +0200362 amd_sched_process_job(fence, &s_fence->cb);
Christian König6f0e54a2015-08-05 21:22:10 +0200363 else if (r)
364 DRM_ERROR("fence add callback failed (%d)\n", r);
365 fence_put(fence);
Christian König27439fc2015-09-02 12:03:06 +0200366 } else {
367 DRM_ERROR("Failed to run job!\n");
Christian König258f3f92015-08-31 17:02:52 +0200368 amd_sched_process_job(NULL, &s_fence->cb);
Christian König6f0e54a2015-08-05 21:22:10 +0200369 }
Christian Königaef48522015-08-20 14:47:46 +0200370
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800371 count = kfifo_out(&entity->job_queue, &sched_job,
372 sizeof(sched_job));
373 WARN_ON(count != sizeof(sched_job));
Christian Königc2b6bd72015-08-25 21:39:31 +0200374 wake_up(&sched->job_scheduled);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800375 }
376 return 0;
377}
378
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800379/**
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800380 * Create a gpu scheduler
381 *
Christian König69f7dd62015-08-20 17:24:40 +0200382 * @ops The backend operations for this scheduler.
383 * @ring The the ring id for the scheduler.
384 * @hw_submissions Number of hw submissions to do.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800385 *
Christian König69f7dd62015-08-20 17:24:40 +0200386 * Return the pointer to scheduler for success, otherwise return NULL
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800387*/
Christian König69f7dd62015-08-20 17:24:40 +0200388struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops,
Chunming Zhouf38fdfd2015-08-24 11:35:26 +0800389 unsigned ring, unsigned hw_submission,
390 void *priv)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800391{
392 struct amd_gpu_scheduler *sched;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800393
394 sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
395 if (!sched)
396 return NULL;
397
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800398 sched->ops = ops;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800399 sched->ring_id = ring;
Chunming Zhou4cef9262015-08-05 19:52:14 +0800400 sched->hw_submission_limit = hw_submission;
Chunming Zhouf38fdfd2015-08-24 11:35:26 +0800401 sched->priv = priv;
Christian Königc14692f02015-08-21 15:18:47 +0200402 snprintf(sched->name, sizeof(sched->name), "amdgpu[%d]", ring);
Christian König432a4ff2015-08-12 11:46:04 +0200403 amd_sched_rq_init(&sched->sched_rq);
404 amd_sched_rq_init(&sched->kernel_rq);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800405
Christian Königc2b6bd72015-08-25 21:39:31 +0200406 init_waitqueue_head(&sched->wake_up_worker);
407 init_waitqueue_head(&sched->job_scheduled);
Christian Königc746ba22015-08-19 16:12:15 +0200408 atomic_set(&sched->hw_rq_count, 0);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800409 /* Each scheduler will run on a seperate kernel thread */
Christian Königc14692f02015-08-21 15:18:47 +0200410 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
Christian Königf4956592015-08-20 16:59:38 +0200411 if (IS_ERR(sched->thread)) {
412 DRM_ERROR("Failed to create scheduler for id %d.\n", ring);
413 kfree(sched);
414 return NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800415 }
416
Christian Königf4956592015-08-20 16:59:38 +0200417 return sched;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800418}
419
420/**
421 * Destroy a gpu scheduler
422 *
423 * @sched The pointer to the scheduler
424 *
425 * return 0 if succeed. -1 if failed.
426 */
427int amd_sched_destroy(struct amd_gpu_scheduler *sched)
428{
429 kthread_stop(sched->thread);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800430 kfree(sched);
431 return 0;
432}