blob: 6f0d40b13a23d55624fe4abd76ff770bec1fbcc5 [file] [log] [blame]
Jammy Zhoua72ce6f2015-05-22 18:55:07 +08001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <drm/drmP.h>
28#include "gpu_scheduler.h"
29
30/* Initialize a given run queue struct */
31static void init_rq(struct amd_run_queue *rq)
32{
33 INIT_LIST_HEAD(&rq->head.list);
34 rq->head.belongto_rq = rq;
35 mutex_init(&rq->lock);
36 atomic_set(&rq->nr_entity, 0);
37 rq->current_entity = &rq->head;
38}
39
40/* Note: caller must hold the lock or in a atomic context */
41static void rq_remove_entity(struct amd_run_queue *rq,
42 struct amd_sched_entity *entity)
43{
44 if (rq->current_entity == entity)
45 rq->current_entity = list_entry(entity->list.prev,
46 typeof(*entity), list);
47 list_del_init(&entity->list);
48 atomic_dec(&rq->nr_entity);
49}
50
51static void rq_add_entity(struct amd_run_queue *rq,
52 struct amd_sched_entity *entity)
53{
54 list_add_tail(&entity->list, &rq->head.list);
55 atomic_inc(&rq->nr_entity);
56}
57
58/**
59 * Select next entity from a specified run queue with round robin policy.
60 * It could return the same entity as current one if current is the only
61 * available one in the queue. Return NULL if nothing available.
62 */
63static struct amd_sched_entity *rq_select_entity(struct amd_run_queue *rq)
64{
65 struct amd_sched_entity *p = rq->current_entity;
66 int i = atomic_read(&rq->nr_entity) + 1; /*real count + dummy head*/
67 while (i) {
68 p = list_entry(p->list.next, typeof(*p), list);
69 if (!rq->check_entity_status(p)) {
70 rq->current_entity = p;
71 break;
72 }
73 i--;
74 }
75 return i ? p : NULL;
76}
77
78static bool context_entity_is_waiting(struct amd_context_entity *entity)
79{
80 /* TODO: sync obj for multi-ring synchronization */
81 return false;
82}
83
84static int gpu_entity_check_status(struct amd_sched_entity *entity)
85{
86 struct amd_context_entity *tmp = NULL;
87
88 if (entity == &entity->belongto_rq->head)
89 return -1;
90
91 tmp = container_of(entity, typeof(*tmp), generic_entity);
92 if (kfifo_is_empty(&tmp->job_queue) ||
93 context_entity_is_waiting(tmp))
94 return -1;
95
96 return 0;
97}
98
99/**
100 * Note: This function should only been called inside scheduler main
101 * function for thread safety, there is no other protection here.
102 * return ture if scheduler has something ready to run.
103 *
104 * For active_hw_rq, there is only one producer(scheduler thread) and
105 * one consumer(ISR). It should be safe to use this function in scheduler
106 * main thread to decide whether to continue emit more IBs.
107*/
108static bool is_scheduler_ready(struct amd_gpu_scheduler *sched)
109{
Chunming Zhou4cef9262015-08-05 19:52:14 +0800110 unsigned long flags;
111 bool full;
112 spin_lock_irqsave(&sched->queue_lock, flags);
113 full = atomic64_read(&sched->hw_rq_count) <
114 sched->hw_submission_limit ? true : false;
115 spin_unlock_irqrestore(&sched->queue_lock, flags);
116
117 return full;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800118}
119
120/**
121 * Select next entity from the kernel run queue, if not available,
122 * return null.
123*/
124static struct amd_context_entity *kernel_rq_select_context(
125 struct amd_gpu_scheduler *sched)
126{
127 struct amd_sched_entity *sched_entity = NULL;
128 struct amd_context_entity *tmp = NULL;
129 struct amd_run_queue *rq = &sched->kernel_rq;
130
131 mutex_lock(&rq->lock);
132 sched_entity = rq_select_entity(rq);
133 if (sched_entity)
134 tmp = container_of(sched_entity,
135 typeof(*tmp),
136 generic_entity);
137 mutex_unlock(&rq->lock);
138 return tmp;
139}
140
141/**
142 * Select next entity containing real IB submissions
143*/
144static struct amd_context_entity *select_context(
145 struct amd_gpu_scheduler *sched)
146{
147 struct amd_context_entity *wake_entity = NULL;
148 struct amd_context_entity *tmp;
149 struct amd_run_queue *rq;
150
151 if (!is_scheduler_ready(sched))
152 return NULL;
153
154 /* Kernel run queue has higher priority than normal run queue*/
155 tmp = kernel_rq_select_context(sched);
156 if (tmp != NULL)
157 goto exit;
158
159 WARN_ON(offsetof(struct amd_context_entity, generic_entity) != 0);
160
161 rq = &sched->sched_rq;
162 mutex_lock(&rq->lock);
163 tmp = container_of(rq_select_entity(rq),
164 typeof(*tmp), generic_entity);
165 mutex_unlock(&rq->lock);
166exit:
167 if (sched->current_entity && (sched->current_entity != tmp))
168 wake_entity = sched->current_entity;
169 sched->current_entity = tmp;
170 if (wake_entity)
171 wake_up(&wake_entity->wait_queue);
172 return tmp;
173}
174
175/**
176 * Init a context entity used by scheduler when submit to HW ring.
177 *
178 * @sched The pointer to the scheduler
179 * @entity The pointer to a valid amd_context_entity
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800180 * @rq The run queue this entity belongs
Christian König0e89d0c2015-08-04 16:58:36 +0200181 * @kernel If this is an entity for the kernel
Jammy Zhou1333f722015-07-30 16:36:58 +0800182 * @jobs The max number of jobs in the job queue
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800183 *
184 * return 0 if succeed. negative error code on failure
185*/
186int amd_context_entity_init(struct amd_gpu_scheduler *sched,
187 struct amd_context_entity *entity,
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800188 struct amd_run_queue *rq,
Jammy Zhou1333f722015-07-30 16:36:58 +0800189 uint32_t jobs)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800190{
191 uint64_t seq_ring = 0;
192
193 if (!(sched && entity && rq))
194 return -EINVAL;
195
196 memset(entity, 0, sizeof(struct amd_context_entity));
197 seq_ring = ((uint64_t)sched->ring_id) << 60;
198 spin_lock_init(&entity->lock);
199 entity->generic_entity.belongto_rq = rq;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800200 entity->scheduler = sched;
201 init_waitqueue_head(&entity->wait_queue);
202 init_waitqueue_head(&entity->wait_emit);
203 if(kfifo_alloc(&entity->job_queue,
Jammy Zhou1333f722015-07-30 16:36:58 +0800204 jobs * sizeof(void *),
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800205 GFP_KERNEL))
206 return -EINVAL;
207
208 spin_lock_init(&entity->queue_lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800209 atomic64_set(&entity->last_emitted_v_seq, seq_ring);
210 atomic64_set(&entity->last_queued_v_seq, seq_ring);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800211
212 /* Add the entity to the run queue */
213 mutex_lock(&rq->lock);
214 rq_add_entity(rq, &entity->generic_entity);
215 mutex_unlock(&rq->lock);
216 return 0;
217}
218
219/**
220 * Query if entity is initialized
221 *
222 * @sched Pointer to scheduler instance
223 * @entity The pointer to a valid scheduler entity
224 *
225 * return true if entity is initialized, false otherwise
226*/
227static bool is_context_entity_initialized(struct amd_gpu_scheduler *sched,
228 struct amd_context_entity *entity)
229{
230 return entity->scheduler == sched &&
231 entity->generic_entity.belongto_rq != NULL;
232}
233
234static bool is_context_entity_idle(struct amd_gpu_scheduler *sched,
235 struct amd_context_entity *entity)
236{
237 /**
238 * Idle means no pending IBs, and the entity is not
239 * currently being used.
240 */
241 barrier();
242 if ((sched->current_entity != entity) &&
243 kfifo_is_empty(&entity->job_queue))
244 return true;
245
246 return false;
247}
248
249/**
250 * Destroy a context entity
251 *
252 * @sched Pointer to scheduler instance
253 * @entity The pointer to a valid scheduler entity
254 *
255 * return 0 if succeed. negative error code on failure
256 */
257int amd_context_entity_fini(struct amd_gpu_scheduler *sched,
258 struct amd_context_entity *entity)
259{
260 int r = 0;
261 struct amd_run_queue *rq = entity->generic_entity.belongto_rq;
262
263 if (!is_context_entity_initialized(sched, entity))
264 return 0;
265
266 /**
267 * The client will not queue more IBs during this fini, consume existing
268 * queued IBs
269 */
270 r = wait_event_timeout(
271 entity->wait_queue,
272 is_context_entity_idle(sched, entity),
273 msecs_to_jiffies(AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS)
274 ) ? 0 : -1;
275
276 if (r) {
277 if (entity->is_pending)
Christian König0e89d0c2015-08-04 16:58:36 +0200278 DRM_INFO("Entity %p is in waiting state during fini,\
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800279 all pending ibs will be canceled.\n",
Christian König0e89d0c2015-08-04 16:58:36 +0200280 entity);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800281 }
282
283 mutex_lock(&rq->lock);
284 rq_remove_entity(rq, &entity->generic_entity);
285 mutex_unlock(&rq->lock);
286 kfifo_free(&entity->job_queue);
287 return r;
288}
289
290/**
291 * Submit a normal job to the job queue
292 *
293 * @sched The pointer to the scheduler
294 * @c_entity The pointer to amd_context_entity
295 * @job The pointer to job required to submit
Chunming Zhou80de5912015-08-05 19:07:08 +0800296 * return 0 if succeed. -1 if failed.
297 * -2 indicate queue is full for this client, client should wait untill
298 * scheduler consum some queued command.
299 * -1 other fail.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800300*/
Chunming Zhou80de5912015-08-05 19:07:08 +0800301int amd_sched_push_job(struct amd_gpu_scheduler *sched,
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800302 struct amd_context_entity *c_entity,
303 void *job)
304{
305 while (kfifo_in_spinlocked(&c_entity->job_queue, &job, sizeof(void *),
306 &c_entity->queue_lock) != sizeof(void *)) {
307 /**
308 * Current context used up all its IB slots
309 * wait here, or need to check whether GPU is hung
310 */
311 schedule();
312 }
313
314 wake_up_interruptible(&sched->wait_queue);
Chunming Zhou80de5912015-08-05 19:07:08 +0800315 return 0;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800316}
317
318/**
Christian König1d7dd222015-07-31 14:31:49 +0200319 * Wait for a virtual sequence number to be emitted.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800320 *
321 * @c_entity The pointer to a valid context entity
322 * @seq The virtual sequence number to wait
323 * @intr Interruptible or not
324 * @timeout Timeout in ms, wait infinitely if <0
325 * @emit wait for emit or signal
326 *
327 * return =0 signaled , <0 failed
328*/
Christian König1d7dd222015-07-31 14:31:49 +0200329int amd_sched_wait_emit(struct amd_context_entity *c_entity,
330 uint64_t seq,
331 bool intr,
332 long timeout)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800333{
Christian König1d7dd222015-07-31 14:31:49 +0200334 atomic64_t *v_seq = &c_entity->last_emitted_v_seq;
335 wait_queue_head_t *wait_queue = &c_entity->wait_emit;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800336
337 if (intr && (timeout < 0)) {
338 wait_event_interruptible(
339 *wait_queue,
340 seq <= atomic64_read(v_seq));
341 return 0;
342 } else if (intr && (timeout >= 0)) {
343 wait_event_interruptible_timeout(
344 *wait_queue,
345 seq <= atomic64_read(v_seq),
346 msecs_to_jiffies(timeout));
347 return (seq <= atomic64_read(v_seq)) ?
348 0 : -1;
349 } else if (!intr && (timeout < 0)) {
350 wait_event(
351 *wait_queue,
352 seq <= atomic64_read(v_seq));
353 return 0;
354 } else if (!intr && (timeout >= 0)) {
355 wait_event_timeout(
356 *wait_queue,
357 seq <= atomic64_read(v_seq),
358 msecs_to_jiffies(timeout));
359 return (seq <= atomic64_read(v_seq)) ?
360 0 : -1;
361 }
362 return 0;
363}
364
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800365static int amd_sched_main(void *param)
366{
367 int r;
368 void *job;
369 struct sched_param sparam = {.sched_priority = 1};
370 struct amd_context_entity *c_entity = NULL;
371 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
372
373 sched_setscheduler(current, SCHED_FIFO, &sparam);
374
375 while (!kthread_should_stop()) {
Chunming Zhou4cef9262015-08-05 19:52:14 +0800376 struct amd_sched_job *sched_job = NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800377 wait_event_interruptible(sched->wait_queue,
378 is_scheduler_ready(sched) &&
379 (c_entity = select_context(sched)));
380 r = kfifo_out(&c_entity->job_queue, &job, sizeof(void *));
381 if (r != sizeof(void *))
382 continue;
383 r = sched->ops->prepare_job(sched, c_entity, job);
Chunming Zhou4cef9262015-08-05 19:52:14 +0800384 if (!r) {
385 unsigned long flags;
386 sched_job = kzalloc(sizeof(struct amd_sched_job),
387 GFP_KERNEL);
388 if (!sched_job) {
389 WARN(true, "No memory to allocate\n");
390 continue;
391 }
392 sched_job->job = job;
393 sched_job->sched = sched;
394 spin_lock_irqsave(&sched->queue_lock, flags);
395 list_add_tail(&sched_job->list, &sched->active_hw_rq);
396 atomic64_inc(&sched->hw_rq_count);
397 spin_unlock_irqrestore(&sched->queue_lock, flags);
398 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800399 mutex_lock(&sched->sched_lock);
Chunming Zhou4cef9262015-08-05 19:52:14 +0800400 sched->ops->run_job(sched, c_entity, sched_job);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800401 mutex_unlock(&sched->sched_lock);
402 }
403 return 0;
404}
405
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800406/**
407 * ISR to handle EOP inetrrupts
408 *
409 * @sched: gpu scheduler
410 *
411*/
Chunming Zhou4cef9262015-08-05 19:52:14 +0800412void amd_sched_process_job(struct amd_sched_job *sched_job)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800413{
Chunming Zhou4cef9262015-08-05 19:52:14 +0800414 unsigned long flags;
415 struct amd_gpu_scheduler *sched;
416 if (!sched_job)
417 return;
418 sched = sched_job->sched;
419 spin_lock_irqsave(&sched->queue_lock, flags);
420 list_del(&sched_job->list);
421 atomic64_dec(&sched->hw_rq_count);
422 spin_unlock_irqrestore(&sched->queue_lock, flags);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800423
Chunming Zhou4cef9262015-08-05 19:52:14 +0800424 sched->ops->process_job(sched, sched_job->job);
425 kfree(sched_job);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800426 wake_up_interruptible(&sched->wait_queue);
427}
428
429/**
430 * Create a gpu scheduler
431 *
432 * @device The device context for this scheduler
433 * @ops The backend operations for this scheduler.
434 * @id The scheduler is per ring, here is ring id.
435 * @granularity The minumum ms unit the scheduler will scheduled.
436 * @preemption Indicate whether this ring support preemption, 0 is no.
437 *
438 * return the pointer to scheduler for success, otherwise return NULL
439*/
440struct amd_gpu_scheduler *amd_sched_create(void *device,
441 struct amd_sched_backend_ops *ops,
442 unsigned ring,
443 unsigned granularity,
Jammy Zhou4afcb302015-07-30 16:44:05 +0800444 unsigned preemption,
445 unsigned hw_submission)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800446{
447 struct amd_gpu_scheduler *sched;
448 char name[20] = "gpu_sched[0]";
449
450 sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
451 if (!sched)
452 return NULL;
453
454 sched->device = device;
455 sched->ops = ops;
456 sched->granularity = granularity;
457 sched->ring_id = ring;
458 sched->preemption = preemption;
Chunming Zhou4cef9262015-08-05 19:52:14 +0800459 sched->hw_submission_limit = hw_submission;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800460 snprintf(name, sizeof(name), "gpu_sched[%d]", ring);
461 mutex_init(&sched->sched_lock);
462 spin_lock_init(&sched->queue_lock);
463 init_rq(&sched->sched_rq);
464 sched->sched_rq.check_entity_status = gpu_entity_check_status;
465
466 init_rq(&sched->kernel_rq);
467 sched->kernel_rq.check_entity_status = gpu_entity_check_status;
468
469 init_waitqueue_head(&sched->wait_queue);
Chunming Zhou4cef9262015-08-05 19:52:14 +0800470 INIT_LIST_HEAD(&sched->active_hw_rq);
471 atomic64_set(&sched->hw_rq_count, 0);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800472 /* Each scheduler will run on a seperate kernel thread */
473 sched->thread = kthread_create(amd_sched_main, sched, name);
474 if (sched->thread) {
475 wake_up_process(sched->thread);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800476 return sched;
477 }
478
479 DRM_ERROR("Failed to create scheduler for id %d.\n", ring);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800480 kfree(sched);
481 return NULL;
482}
483
484/**
485 * Destroy a gpu scheduler
486 *
487 * @sched The pointer to the scheduler
488 *
489 * return 0 if succeed. -1 if failed.
490 */
491int amd_sched_destroy(struct amd_gpu_scheduler *sched)
492{
493 kthread_stop(sched->thread);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800494 kfree(sched);
495 return 0;
496}
497
Jammy Zhouf95b7e32015-07-31 17:18:15 +0800498/**
499 * Update emitted sequence and wake up the waiters, called by run_job
500 * in driver side
501 *
502 * @entity The context entity
503 * @seq The sequence number for the latest emitted job
504*/
505void amd_sched_emit(struct amd_context_entity *c_entity, uint64_t seq)
506{
507 atomic64_set(&c_entity->last_emitted_v_seq, seq);
508 wake_up_all(&c_entity->wait_emit);
509}
Jammy Zhou27f66422015-08-03 10:27:57 +0800510
511/**
512 * Get next queued sequence number
513 *
514 * @entity The context entity
515 *
516 * return the next queued sequence number
517*/
518uint64_t amd_sched_next_queued_seq(struct amd_context_entity *c_entity)
519{
520 return atomic64_read(&c_entity->last_queued_v_seq) + 1;
521}