blob: d747f82808a7197cbc36d070fd354b15a61ea24c [file] [log] [blame]
Jammy Zhoua72ce6f2015-05-22 18:55:07 +08001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <drm/drmP.h>
28#include "gpu_scheduler.h"
29
30/* Initialize a given run queue struct */
Christian König432a4ff2015-08-12 11:46:04 +020031static void amd_sched_rq_init(struct amd_sched_rq *rq)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080032{
Christian König2b184d82015-08-18 14:41:25 +020033 spin_lock_init(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020034 INIT_LIST_HEAD(&rq->entities);
Christian König432a4ff2015-08-12 11:46:04 +020035 rq->current_entity = NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080036}
37
Christian König432a4ff2015-08-12 11:46:04 +020038static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
39 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080040{
Christian König2b184d82015-08-18 14:41:25 +020041 spin_lock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020042 list_add_tail(&entity->list, &rq->entities);
Christian König2b184d82015-08-18 14:41:25 +020043 spin_unlock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020044}
45
46static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
47 struct amd_sched_entity *entity)
48{
Christian König2b184d82015-08-18 14:41:25 +020049 spin_lock(&rq->lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080050 list_del_init(&entity->list);
Christian König432a4ff2015-08-12 11:46:04 +020051 if (rq->current_entity == entity)
52 rq->current_entity = NULL;
Christian König2b184d82015-08-18 14:41:25 +020053 spin_unlock(&rq->lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080054}
55
56/**
57 * Select next entity from a specified run queue with round robin policy.
58 * It could return the same entity as current one if current is the only
59 * available one in the queue. Return NULL if nothing available.
60 */
Christian König432a4ff2015-08-12 11:46:04 +020061static struct amd_sched_entity *
62amd_sched_rq_select_entity(struct amd_sched_rq *rq)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080063{
Christian König2b184d82015-08-18 14:41:25 +020064 struct amd_sched_entity *entity;
Christian König4cd7f42c2015-08-05 18:18:52 +020065
Christian König2b184d82015-08-18 14:41:25 +020066 spin_lock(&rq->lock);
67
68 entity = rq->current_entity;
Christian König432a4ff2015-08-12 11:46:04 +020069 if (entity) {
70 list_for_each_entry_continue(entity, &rq->entities, list) {
71 if (!kfifo_is_empty(&entity->job_queue)) {
72 rq->current_entity = entity;
Christian König2b184d82015-08-18 14:41:25 +020073 spin_unlock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020074 return rq->current_entity;
75 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080076 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080077 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080078
Christian König432a4ff2015-08-12 11:46:04 +020079 list_for_each_entry(entity, &rq->entities, list) {
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080080
Christian König432a4ff2015-08-12 11:46:04 +020081 if (!kfifo_is_empty(&entity->job_queue)) {
82 rq->current_entity = entity;
Christian König2b184d82015-08-18 14:41:25 +020083 spin_unlock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020084 return rq->current_entity;
85 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080086
Christian König432a4ff2015-08-12 11:46:04 +020087 if (entity == rq->current_entity)
88 break;
89 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080090
Christian König2b184d82015-08-18 14:41:25 +020091 spin_unlock(&rq->lock);
92
Christian König432a4ff2015-08-12 11:46:04 +020093 return NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080094}
95
96/**
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080097 * Init a context entity used by scheduler when submit to HW ring.
98 *
99 * @sched The pointer to the scheduler
Christian König91404fb2015-08-05 18:33:21 +0200100 * @entity The pointer to a valid amd_sched_entity
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800101 * @rq The run queue this entity belongs
Christian König0e89d0c2015-08-04 16:58:36 +0200102 * @kernel If this is an entity for the kernel
Jammy Zhou1333f722015-07-30 16:36:58 +0800103 * @jobs The max number of jobs in the job queue
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800104 *
105 * return 0 if succeed. negative error code on failure
106*/
Christian König91404fb2015-08-05 18:33:21 +0200107int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
Christian König6f0e54a2015-08-05 21:22:10 +0200108 struct amd_sched_entity *entity,
Christian König432a4ff2015-08-12 11:46:04 +0200109 struct amd_sched_rq *rq,
Christian König6f0e54a2015-08-05 21:22:10 +0200110 uint32_t jobs)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800111{
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800112 if (!(sched && entity && rq))
113 return -EINVAL;
114
Christian König91404fb2015-08-05 18:33:21 +0200115 memset(entity, 0, sizeof(struct amd_sched_entity));
Christian König91404fb2015-08-05 18:33:21 +0200116 entity->belongto_rq = rq;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800117 entity->scheduler = sched;
118 init_waitqueue_head(&entity->wait_queue);
Chunming Zhouf556cb0c2015-08-02 11:18:04 +0800119 entity->fence_context = fence_context_alloc(1);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800120 if(kfifo_alloc(&entity->job_queue,
Jammy Zhou1333f722015-07-30 16:36:58 +0800121 jobs * sizeof(void *),
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800122 GFP_KERNEL))
123 return -EINVAL;
124
125 spin_lock_init(&entity->queue_lock);
Christian Königce882e62015-08-19 15:00:55 +0200126 atomic_set(&entity->fence_seq, 0);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800127
128 /* Add the entity to the run queue */
Christian König432a4ff2015-08-12 11:46:04 +0200129 amd_sched_rq_add_entity(rq, entity);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800130 return 0;
131}
132
133/**
134 * Query if entity is initialized
135 *
136 * @sched Pointer to scheduler instance
137 * @entity The pointer to a valid scheduler entity
138 *
139 * return true if entity is initialized, false otherwise
140*/
Christian Königd54fdb92015-08-20 17:03:48 +0200141static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
142 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800143{
144 return entity->scheduler == sched &&
Christian König91404fb2015-08-05 18:33:21 +0200145 entity->belongto_rq != NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800146}
147
Christian Königaef48522015-08-20 14:47:46 +0200148/**
149 * Check if entity is idle
150 *
151 * @entity The pointer to a valid scheduler entity
152 *
153 * Return true if entity don't has any unscheduled jobs.
154 */
155static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800156{
Christian Königaef48522015-08-20 14:47:46 +0200157 rmb();
158 if (kfifo_is_empty(&entity->job_queue))
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800159 return true;
160
161 return false;
162}
163
164/**
165 * Destroy a context entity
166 *
167 * @sched Pointer to scheduler instance
168 * @entity The pointer to a valid scheduler entity
169 *
170 * return 0 if succeed. negative error code on failure
171 */
Christian König91404fb2015-08-05 18:33:21 +0200172int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
173 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800174{
Christian König432a4ff2015-08-12 11:46:04 +0200175 struct amd_sched_rq *rq = entity->belongto_rq;
Christian Königaef48522015-08-20 14:47:46 +0200176 long r;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800177
Christian Königd54fdb92015-08-20 17:03:48 +0200178 if (!amd_sched_entity_is_initialized(sched, entity))
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800179 return 0;
Christian König6c859272015-08-20 16:12:50 +0200180
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800181 /**
182 * The client will not queue more IBs during this fini, consume existing
183 * queued IBs
184 */
Christian Königaef48522015-08-20 14:47:46 +0200185 r = wait_event_timeout(entity->wait_queue,
186 amd_sched_entity_is_idle(entity),
187 msecs_to_jiffies(AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS));
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800188
Christian Königaef48522015-08-20 14:47:46 +0200189 if (r <= 0)
Christian König9788ec42015-08-19 17:34:20 +0200190 DRM_INFO("Entity %p is in waiting state during fini\n",
191 entity);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800192
Christian König432a4ff2015-08-12 11:46:04 +0200193 amd_sched_rq_remove_entity(rq, entity);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800194 kfifo_free(&entity->job_queue);
195 return r;
196}
197
198/**
Christian König6c859272015-08-20 16:12:50 +0200199 * Helper to submit a job to the job queue
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800200 *
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800201 * @job The pointer to job required to submit
Christian König6c859272015-08-20 16:12:50 +0200202 *
203 * Returns true if we could submit the job.
204 */
205static bool amd_sched_entity_in(struct amd_sched_job *job)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800206{
Christian König6c859272015-08-20 16:12:50 +0200207 struct amd_sched_entity *entity = job->s_entity;
208 bool added, first = false;
209
210 spin_lock(&entity->queue_lock);
211 added = kfifo_in(&entity->job_queue, &job, sizeof(job)) == sizeof(job);
212
213 if (added && kfifo_len(&entity->job_queue) == sizeof(job))
214 first = true;
215
216 spin_unlock(&entity->queue_lock);
217
218 /* first job wakes up scheduler */
219 if (first)
220 wake_up_interruptible(&job->sched->wait_queue);
221
222 return added;
223}
224
225/**
226 * Submit a job to the job queue
227 *
228 * @job The pointer to job required to submit
229 *
230 * Returns 0 for success, negative error code otherwise.
231 */
232int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
233{
234 struct amd_sched_entity *entity = sched_job->s_entity;
235 struct amd_sched_fence *fence = amd_sched_fence_create(entity);
236 int r;
237
Chunming Zhouf556cb0c2015-08-02 11:18:04 +0800238 if (!fence)
Christian König6c859272015-08-20 16:12:50 +0200239 return -ENOMEM;
240
Chunming Zhoubb977d32015-08-18 15:16:40 +0800241 fence_get(&fence->base);
242 sched_job->s_fence = fence;
Christian König6c859272015-08-20 16:12:50 +0200243
244 r = wait_event_interruptible(entity->wait_queue,
245 amd_sched_entity_in(sched_job));
246
247 return r;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800248}
249
Christian Könige688b7282015-08-20 17:01:01 +0200250/**
251 * Return ture if we can push more jobs to the hw.
252 */
253static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
254{
255 return atomic_read(&sched->hw_rq_count) <
256 sched->hw_submission_limit;
257}
258
259/**
260 * Select next entity containing real IB submissions
261*/
262static struct amd_sched_entity *
263amd_sched_select_context(struct amd_gpu_scheduler *sched)
264{
265 struct amd_sched_entity *tmp;
266
267 if (!amd_sched_ready(sched))
268 return NULL;
269
270 /* Kernel run queue has higher priority than normal run queue*/
271 tmp = amd_sched_rq_select_entity(&sched->kernel_rq);
272 if (tmp == NULL)
273 tmp = amd_sched_rq_select_entity(&sched->sched_rq);
274
275 return tmp;
276}
277
Christian König6f0e54a2015-08-05 21:22:10 +0200278static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
279{
280 struct amd_sched_job *sched_job =
281 container_of(cb, struct amd_sched_job, cb);
282 struct amd_gpu_scheduler *sched;
Christian König6f0e54a2015-08-05 21:22:10 +0200283
284 sched = sched_job->sched;
Chunming Zhouf556cb0c2015-08-02 11:18:04 +0800285 amd_sched_fence_signal(sched_job->s_fence);
Christian Königc746ba22015-08-19 16:12:15 +0200286 atomic_dec(&sched->hw_rq_count);
Chunming Zhouf556cb0c2015-08-02 11:18:04 +0800287 fence_put(&sched_job->s_fence->base);
Chunming Zhoubb977d32015-08-18 15:16:40 +0800288 sched->ops->process_job(sched, sched_job);
Christian König6f0e54a2015-08-05 21:22:10 +0200289 wake_up_interruptible(&sched->wait_queue);
290}
291
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800292static int amd_sched_main(void *param)
293{
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800294 struct sched_param sparam = {.sched_priority = 1};
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800295 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
Christian Königf85a6dd2015-08-19 17:37:52 +0200296 int r;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800297
298 sched_setscheduler(current, SCHED_FIFO, &sparam);
299
300 while (!kthread_should_stop()) {
Christian Königf85a6dd2015-08-19 17:37:52 +0200301 struct amd_sched_entity *c_entity = NULL;
302 struct amd_sched_job *job;
Christian König6f0e54a2015-08-05 21:22:10 +0200303 struct fence *fence;
304
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800305 wait_event_interruptible(sched->wait_queue,
Christian Königf85a6dd2015-08-19 17:37:52 +0200306 kthread_should_stop() ||
307 (c_entity = amd_sched_select_context(sched)));
308
309 if (!c_entity)
310 continue;
311
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800312 r = kfifo_out(&c_entity->job_queue, &job, sizeof(void *));
313 if (r != sizeof(void *))
314 continue;
Christian Königb034b572015-08-20 17:08:25 +0200315 atomic_inc(&sched->hw_rq_count);
316
Chunming Zhou953e8fd2015-08-06 15:19:12 +0800317 fence = sched->ops->run_job(sched, c_entity, job);
Christian König6f0e54a2015-08-05 21:22:10 +0200318 if (fence) {
Chunming Zhou953e8fd2015-08-06 15:19:12 +0800319 r = fence_add_callback(fence, &job->cb,
Christian König6f0e54a2015-08-05 21:22:10 +0200320 amd_sched_process_job);
321 if (r == -ENOENT)
Chunming Zhou953e8fd2015-08-06 15:19:12 +0800322 amd_sched_process_job(fence, &job->cb);
Christian König6f0e54a2015-08-05 21:22:10 +0200323 else if (r)
324 DRM_ERROR("fence add callback failed (%d)\n", r);
325 fence_put(fence);
326 }
Christian Königaef48522015-08-20 14:47:46 +0200327
Christian König6c859272015-08-20 16:12:50 +0200328 wake_up(&c_entity->wait_queue);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800329 }
330 return 0;
331}
332
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800333/**
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800334 * Create a gpu scheduler
335 *
Christian König69f7dd62015-08-20 17:24:40 +0200336 * @ops The backend operations for this scheduler.
337 * @ring The the ring id for the scheduler.
338 * @hw_submissions Number of hw submissions to do.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800339 *
Christian König69f7dd62015-08-20 17:24:40 +0200340 * Return the pointer to scheduler for success, otherwise return NULL
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800341*/
Christian König69f7dd62015-08-20 17:24:40 +0200342struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops,
343 unsigned ring, unsigned hw_submission)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800344{
345 struct amd_gpu_scheduler *sched;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800346
347 sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
348 if (!sched)
349 return NULL;
350
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800351 sched->ops = ops;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800352 sched->ring_id = ring;
Chunming Zhou4cef9262015-08-05 19:52:14 +0800353 sched->hw_submission_limit = hw_submission;
Christian Königc14692f02015-08-21 15:18:47 +0200354 snprintf(sched->name, sizeof(sched->name), "amdgpu[%d]", ring);
Christian König432a4ff2015-08-12 11:46:04 +0200355 amd_sched_rq_init(&sched->sched_rq);
356 amd_sched_rq_init(&sched->kernel_rq);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800357
358 init_waitqueue_head(&sched->wait_queue);
Christian Königc746ba22015-08-19 16:12:15 +0200359 atomic_set(&sched->hw_rq_count, 0);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800360 /* Each scheduler will run on a seperate kernel thread */
Christian Königc14692f02015-08-21 15:18:47 +0200361 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
Christian Königf4956592015-08-20 16:59:38 +0200362 if (IS_ERR(sched->thread)) {
363 DRM_ERROR("Failed to create scheduler for id %d.\n", ring);
364 kfree(sched);
365 return NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800366 }
367
Christian Königf4956592015-08-20 16:59:38 +0200368 return sched;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800369}
370
371/**
372 * Destroy a gpu scheduler
373 *
374 * @sched The pointer to the scheduler
375 *
376 * return 0 if succeed. -1 if failed.
377 */
378int amd_sched_destroy(struct amd_gpu_scheduler *sched)
379{
380 kthread_stop(sched->thread);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800381 kfree(sched);
382 return 0;
383}