blob: 33b4f55e48b10d79e7d2951f05744295a0e723c8 [file] [log] [blame]
Jammy Zhoua72ce6f2015-05-22 18:55:07 +08001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <drm/drmP.h>
28#include "gpu_scheduler.h"
29
30/* Initialize a given run queue struct */
31static void init_rq(struct amd_run_queue *rq)
32{
33 INIT_LIST_HEAD(&rq->head.list);
34 rq->head.belongto_rq = rq;
35 mutex_init(&rq->lock);
36 atomic_set(&rq->nr_entity, 0);
37 rq->current_entity = &rq->head;
38}
39
40/* Note: caller must hold the lock or in a atomic context */
41static void rq_remove_entity(struct amd_run_queue *rq,
42 struct amd_sched_entity *entity)
43{
44 if (rq->current_entity == entity)
45 rq->current_entity = list_entry(entity->list.prev,
46 typeof(*entity), list);
47 list_del_init(&entity->list);
48 atomic_dec(&rq->nr_entity);
49}
50
51static void rq_add_entity(struct amd_run_queue *rq,
52 struct amd_sched_entity *entity)
53{
54 list_add_tail(&entity->list, &rq->head.list);
55 atomic_inc(&rq->nr_entity);
56}
57
58/**
59 * Select next entity from a specified run queue with round robin policy.
60 * It could return the same entity as current one if current is the only
61 * available one in the queue. Return NULL if nothing available.
62 */
63static struct amd_sched_entity *rq_select_entity(struct amd_run_queue *rq)
64{
65 struct amd_sched_entity *p = rq->current_entity;
66 int i = atomic_read(&rq->nr_entity) + 1; /*real count + dummy head*/
Christian König4cd7f42c2015-08-05 18:18:52 +020067
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080068 while (i) {
69 p = list_entry(p->list.next, typeof(*p), list);
70 if (!rq->check_entity_status(p)) {
71 rq->current_entity = p;
72 break;
73 }
74 i--;
75 }
76 return i ? p : NULL;
77}
78
Christian König91404fb2015-08-05 18:33:21 +020079static bool context_entity_is_waiting(struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080080{
81 /* TODO: sync obj for multi-ring synchronization */
82 return false;
83}
84
85static int gpu_entity_check_status(struct amd_sched_entity *entity)
86{
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080087 if (entity == &entity->belongto_rq->head)
88 return -1;
89
Christian König91404fb2015-08-05 18:33:21 +020090 if (kfifo_is_empty(&entity->job_queue) ||
91 context_entity_is_waiting(entity))
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080092 return -1;
93
94 return 0;
95}
96
97/**
98 * Note: This function should only been called inside scheduler main
99 * function for thread safety, there is no other protection here.
100 * return ture if scheduler has something ready to run.
101 *
102 * For active_hw_rq, there is only one producer(scheduler thread) and
103 * one consumer(ISR). It should be safe to use this function in scheduler
104 * main thread to decide whether to continue emit more IBs.
105*/
106static bool is_scheduler_ready(struct amd_gpu_scheduler *sched)
107{
Chunming Zhou4cef9262015-08-05 19:52:14 +0800108 unsigned long flags;
109 bool full;
Christian König4cd7f42c2015-08-05 18:18:52 +0200110
Chunming Zhou4cef9262015-08-05 19:52:14 +0800111 spin_lock_irqsave(&sched->queue_lock, flags);
112 full = atomic64_read(&sched->hw_rq_count) <
113 sched->hw_submission_limit ? true : false;
114 spin_unlock_irqrestore(&sched->queue_lock, flags);
115
116 return full;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800117}
118
119/**
120 * Select next entity from the kernel run queue, if not available,
121 * return null.
122*/
Christian König91404fb2015-08-05 18:33:21 +0200123static struct amd_sched_entity *
Christian König4cd7f42c2015-08-05 18:18:52 +0200124kernel_rq_select_context(struct amd_gpu_scheduler *sched)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800125{
Christian König4cd7f42c2015-08-05 18:18:52 +0200126 struct amd_sched_entity *sched_entity;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800127 struct amd_run_queue *rq = &sched->kernel_rq;
128
129 mutex_lock(&rq->lock);
130 sched_entity = rq_select_entity(rq);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800131 mutex_unlock(&rq->lock);
Christian König91404fb2015-08-05 18:33:21 +0200132 return sched_entity;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800133}
134
135/**
136 * Select next entity containing real IB submissions
137*/
Christian König91404fb2015-08-05 18:33:21 +0200138static struct amd_sched_entity *
Christian König4cd7f42c2015-08-05 18:18:52 +0200139select_context(struct amd_gpu_scheduler *sched)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800140{
Christian König91404fb2015-08-05 18:33:21 +0200141 struct amd_sched_entity *wake_entity = NULL;
142 struct amd_sched_entity *tmp;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800143 struct amd_run_queue *rq;
144
145 if (!is_scheduler_ready(sched))
146 return NULL;
147
148 /* Kernel run queue has higher priority than normal run queue*/
149 tmp = kernel_rq_select_context(sched);
150 if (tmp != NULL)
151 goto exit;
152
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800153 rq = &sched->sched_rq;
154 mutex_lock(&rq->lock);
Christian König91404fb2015-08-05 18:33:21 +0200155 tmp = rq_select_entity(rq);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800156 mutex_unlock(&rq->lock);
157exit:
158 if (sched->current_entity && (sched->current_entity != tmp))
159 wake_entity = sched->current_entity;
160 sched->current_entity = tmp;
161 if (wake_entity)
162 wake_up(&wake_entity->wait_queue);
163 return tmp;
164}
165
166/**
167 * Init a context entity used by scheduler when submit to HW ring.
168 *
169 * @sched The pointer to the scheduler
Christian König91404fb2015-08-05 18:33:21 +0200170 * @entity The pointer to a valid amd_sched_entity
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800171 * @rq The run queue this entity belongs
Christian König0e89d0c2015-08-04 16:58:36 +0200172 * @kernel If this is an entity for the kernel
Jammy Zhou1333f722015-07-30 16:36:58 +0800173 * @jobs The max number of jobs in the job queue
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800174 *
175 * return 0 if succeed. negative error code on failure
176*/
Christian König91404fb2015-08-05 18:33:21 +0200177int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
Christian König6f0e54a2015-08-05 21:22:10 +0200178 struct amd_sched_entity *entity,
179 struct amd_run_queue *rq,
180 uint32_t jobs)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800181{
182 uint64_t seq_ring = 0;
183
184 if (!(sched && entity && rq))
185 return -EINVAL;
186
Christian König91404fb2015-08-05 18:33:21 +0200187 memset(entity, 0, sizeof(struct amd_sched_entity));
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800188 seq_ring = ((uint64_t)sched->ring_id) << 60;
189 spin_lock_init(&entity->lock);
Christian König91404fb2015-08-05 18:33:21 +0200190 entity->belongto_rq = rq;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800191 entity->scheduler = sched;
192 init_waitqueue_head(&entity->wait_queue);
193 init_waitqueue_head(&entity->wait_emit);
194 if(kfifo_alloc(&entity->job_queue,
Jammy Zhou1333f722015-07-30 16:36:58 +0800195 jobs * sizeof(void *),
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800196 GFP_KERNEL))
197 return -EINVAL;
198
199 spin_lock_init(&entity->queue_lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800200 atomic64_set(&entity->last_emitted_v_seq, seq_ring);
201 atomic64_set(&entity->last_queued_v_seq, seq_ring);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800202
203 /* Add the entity to the run queue */
204 mutex_lock(&rq->lock);
Christian König91404fb2015-08-05 18:33:21 +0200205 rq_add_entity(rq, entity);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800206 mutex_unlock(&rq->lock);
207 return 0;
208}
209
210/**
211 * Query if entity is initialized
212 *
213 * @sched Pointer to scheduler instance
214 * @entity The pointer to a valid scheduler entity
215 *
216 * return true if entity is initialized, false otherwise
217*/
218static bool is_context_entity_initialized(struct amd_gpu_scheduler *sched,
Christian König91404fb2015-08-05 18:33:21 +0200219 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800220{
221 return entity->scheduler == sched &&
Christian König91404fb2015-08-05 18:33:21 +0200222 entity->belongto_rq != NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800223}
224
225static bool is_context_entity_idle(struct amd_gpu_scheduler *sched,
Christian König91404fb2015-08-05 18:33:21 +0200226 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800227{
228 /**
229 * Idle means no pending IBs, and the entity is not
230 * currently being used.
231 */
232 barrier();
233 if ((sched->current_entity != entity) &&
234 kfifo_is_empty(&entity->job_queue))
235 return true;
236
237 return false;
238}
239
240/**
241 * Destroy a context entity
242 *
243 * @sched Pointer to scheduler instance
244 * @entity The pointer to a valid scheduler entity
245 *
246 * return 0 if succeed. negative error code on failure
247 */
Christian König91404fb2015-08-05 18:33:21 +0200248int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
249 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800250{
251 int r = 0;
Christian König91404fb2015-08-05 18:33:21 +0200252 struct amd_run_queue *rq = entity->belongto_rq;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800253
254 if (!is_context_entity_initialized(sched, entity))
255 return 0;
256
257 /**
258 * The client will not queue more IBs during this fini, consume existing
259 * queued IBs
260 */
261 r = wait_event_timeout(
262 entity->wait_queue,
263 is_context_entity_idle(sched, entity),
264 msecs_to_jiffies(AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS)
265 ) ? 0 : -1;
266
267 if (r) {
268 if (entity->is_pending)
Christian König0e89d0c2015-08-04 16:58:36 +0200269 DRM_INFO("Entity %p is in waiting state during fini,\
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800270 all pending ibs will be canceled.\n",
Christian König0e89d0c2015-08-04 16:58:36 +0200271 entity);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800272 }
273
274 mutex_lock(&rq->lock);
Christian König91404fb2015-08-05 18:33:21 +0200275 rq_remove_entity(rq, entity);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800276 mutex_unlock(&rq->lock);
277 kfifo_free(&entity->job_queue);
278 return r;
279}
280
281/**
282 * Submit a normal job to the job queue
283 *
284 * @sched The pointer to the scheduler
Christian König91404fb2015-08-05 18:33:21 +0200285 * @c_entity The pointer to amd_sched_entity
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800286 * @job The pointer to job required to submit
Chunming Zhou80de5912015-08-05 19:07:08 +0800287 * return 0 if succeed. -1 if failed.
288 * -2 indicate queue is full for this client, client should wait untill
289 * scheduler consum some queued command.
290 * -1 other fail.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800291*/
Chunming Zhou80de5912015-08-05 19:07:08 +0800292int amd_sched_push_job(struct amd_gpu_scheduler *sched,
Christian König91404fb2015-08-05 18:33:21 +0200293 struct amd_sched_entity *c_entity,
Chunming Zhou953e8fd2015-08-06 15:19:12 +0800294 void *data)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800295{
Chunming Zhou953e8fd2015-08-06 15:19:12 +0800296 struct amd_sched_job *job = kzalloc(sizeof(struct amd_sched_job),
297 GFP_KERNEL);
298 if (!job)
299 return -ENOMEM;
300 job->sched = sched;
301 job->s_entity = c_entity;
302 job->data = data;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800303 while (kfifo_in_spinlocked(&c_entity->job_queue, &job, sizeof(void *),
304 &c_entity->queue_lock) != sizeof(void *)) {
305 /**
306 * Current context used up all its IB slots
307 * wait here, or need to check whether GPU is hung
308 */
309 schedule();
310 }
311
312 wake_up_interruptible(&sched->wait_queue);
Chunming Zhou80de5912015-08-05 19:07:08 +0800313 return 0;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800314}
315
316/**
Christian König1d7dd222015-07-31 14:31:49 +0200317 * Wait for a virtual sequence number to be emitted.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800318 *
319 * @c_entity The pointer to a valid context entity
320 * @seq The virtual sequence number to wait
321 * @intr Interruptible or not
322 * @timeout Timeout in ms, wait infinitely if <0
323 * @emit wait for emit or signal
324 *
325 * return =0 signaled , <0 failed
326*/
Christian König91404fb2015-08-05 18:33:21 +0200327int amd_sched_wait_emit(struct amd_sched_entity *c_entity,
Christian König1d7dd222015-07-31 14:31:49 +0200328 uint64_t seq,
329 bool intr,
330 long timeout)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800331{
Christian König1d7dd222015-07-31 14:31:49 +0200332 atomic64_t *v_seq = &c_entity->last_emitted_v_seq;
333 wait_queue_head_t *wait_queue = &c_entity->wait_emit;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800334
335 if (intr && (timeout < 0)) {
336 wait_event_interruptible(
337 *wait_queue,
338 seq <= atomic64_read(v_seq));
339 return 0;
340 } else if (intr && (timeout >= 0)) {
341 wait_event_interruptible_timeout(
342 *wait_queue,
343 seq <= atomic64_read(v_seq),
344 msecs_to_jiffies(timeout));
345 return (seq <= atomic64_read(v_seq)) ?
346 0 : -1;
347 } else if (!intr && (timeout < 0)) {
348 wait_event(
349 *wait_queue,
350 seq <= atomic64_read(v_seq));
351 return 0;
352 } else if (!intr && (timeout >= 0)) {
353 wait_event_timeout(
354 *wait_queue,
355 seq <= atomic64_read(v_seq),
356 msecs_to_jiffies(timeout));
357 return (seq <= atomic64_read(v_seq)) ?
358 0 : -1;
359 }
360 return 0;
361}
362
Christian König6f0e54a2015-08-05 21:22:10 +0200363static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
364{
365 struct amd_sched_job *sched_job =
366 container_of(cb, struct amd_sched_job, cb);
367 struct amd_gpu_scheduler *sched;
368 unsigned long flags;
369
370 sched = sched_job->sched;
371 spin_lock_irqsave(&sched->queue_lock, flags);
372 list_del(&sched_job->list);
373 atomic64_dec(&sched->hw_rq_count);
374 spin_unlock_irqrestore(&sched->queue_lock, flags);
375
Chunming Zhou953e8fd2015-08-06 15:19:12 +0800376 sched->ops->process_job(sched, sched_job);
Christian König6f0e54a2015-08-05 21:22:10 +0200377 kfree(sched_job);
378 wake_up_interruptible(&sched->wait_queue);
379}
380
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800381static int amd_sched_main(void *param)
382{
383 int r;
Chunming Zhou953e8fd2015-08-06 15:19:12 +0800384 struct amd_sched_job *job;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800385 struct sched_param sparam = {.sched_priority = 1};
Christian König91404fb2015-08-05 18:33:21 +0200386 struct amd_sched_entity *c_entity = NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800387 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
388
389 sched_setscheduler(current, SCHED_FIFO, &sparam);
390
391 while (!kthread_should_stop()) {
Christian König6f0e54a2015-08-05 21:22:10 +0200392 struct fence *fence;
393
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800394 wait_event_interruptible(sched->wait_queue,
395 is_scheduler_ready(sched) &&
396 (c_entity = select_context(sched)));
397 r = kfifo_out(&c_entity->job_queue, &job, sizeof(void *));
398 if (r != sizeof(void *))
399 continue;
400 r = sched->ops->prepare_job(sched, c_entity, job);
Chunming Zhou4cef9262015-08-05 19:52:14 +0800401 if (!r) {
402 unsigned long flags;
Chunming Zhou4cef9262015-08-05 19:52:14 +0800403 spin_lock_irqsave(&sched->queue_lock, flags);
Chunming Zhou953e8fd2015-08-06 15:19:12 +0800404 list_add_tail(&job->list, &sched->active_hw_rq);
Chunming Zhou4cef9262015-08-05 19:52:14 +0800405 atomic64_inc(&sched->hw_rq_count);
406 spin_unlock_irqrestore(&sched->queue_lock, flags);
407 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800408 mutex_lock(&sched->sched_lock);
Chunming Zhou953e8fd2015-08-06 15:19:12 +0800409 fence = sched->ops->run_job(sched, c_entity, job);
Christian König6f0e54a2015-08-05 21:22:10 +0200410 if (fence) {
Chunming Zhou953e8fd2015-08-06 15:19:12 +0800411 r = fence_add_callback(fence, &job->cb,
Christian König6f0e54a2015-08-05 21:22:10 +0200412 amd_sched_process_job);
413 if (r == -ENOENT)
Chunming Zhou953e8fd2015-08-06 15:19:12 +0800414 amd_sched_process_job(fence, &job->cb);
Christian König6f0e54a2015-08-05 21:22:10 +0200415 else if (r)
416 DRM_ERROR("fence add callback failed (%d)\n", r);
417 fence_put(fence);
418 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800419 mutex_unlock(&sched->sched_lock);
420 }
421 return 0;
422}
423
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800424/**
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800425 * Create a gpu scheduler
426 *
427 * @device The device context for this scheduler
428 * @ops The backend operations for this scheduler.
429 * @id The scheduler is per ring, here is ring id.
430 * @granularity The minumum ms unit the scheduler will scheduled.
431 * @preemption Indicate whether this ring support preemption, 0 is no.
432 *
433 * return the pointer to scheduler for success, otherwise return NULL
434*/
435struct amd_gpu_scheduler *amd_sched_create(void *device,
436 struct amd_sched_backend_ops *ops,
437 unsigned ring,
438 unsigned granularity,
Jammy Zhou4afcb302015-07-30 16:44:05 +0800439 unsigned preemption,
440 unsigned hw_submission)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800441{
442 struct amd_gpu_scheduler *sched;
Christian König4cd7f42c2015-08-05 18:18:52 +0200443 char name[20];
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800444
445 sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
446 if (!sched)
447 return NULL;
448
449 sched->device = device;
450 sched->ops = ops;
451 sched->granularity = granularity;
452 sched->ring_id = ring;
453 sched->preemption = preemption;
Chunming Zhou4cef9262015-08-05 19:52:14 +0800454 sched->hw_submission_limit = hw_submission;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800455 snprintf(name, sizeof(name), "gpu_sched[%d]", ring);
456 mutex_init(&sched->sched_lock);
457 spin_lock_init(&sched->queue_lock);
458 init_rq(&sched->sched_rq);
459 sched->sched_rq.check_entity_status = gpu_entity_check_status;
460
461 init_rq(&sched->kernel_rq);
462 sched->kernel_rq.check_entity_status = gpu_entity_check_status;
463
464 init_waitqueue_head(&sched->wait_queue);
Chunming Zhou4cef9262015-08-05 19:52:14 +0800465 INIT_LIST_HEAD(&sched->active_hw_rq);
466 atomic64_set(&sched->hw_rq_count, 0);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800467 /* Each scheduler will run on a seperate kernel thread */
468 sched->thread = kthread_create(amd_sched_main, sched, name);
469 if (sched->thread) {
470 wake_up_process(sched->thread);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800471 return sched;
472 }
473
474 DRM_ERROR("Failed to create scheduler for id %d.\n", ring);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800475 kfree(sched);
476 return NULL;
477}
478
479/**
480 * Destroy a gpu scheduler
481 *
482 * @sched The pointer to the scheduler
483 *
484 * return 0 if succeed. -1 if failed.
485 */
486int amd_sched_destroy(struct amd_gpu_scheduler *sched)
487{
488 kthread_stop(sched->thread);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800489 kfree(sched);
490 return 0;
491}
492
Jammy Zhouf95b7e32015-07-31 17:18:15 +0800493/**
494 * Update emitted sequence and wake up the waiters, called by run_job
495 * in driver side
496 *
497 * @entity The context entity
498 * @seq The sequence number for the latest emitted job
499*/
Christian König91404fb2015-08-05 18:33:21 +0200500void amd_sched_emit(struct amd_sched_entity *c_entity, uint64_t seq)
Jammy Zhouf95b7e32015-07-31 17:18:15 +0800501{
502 atomic64_set(&c_entity->last_emitted_v_seq, seq);
503 wake_up_all(&c_entity->wait_emit);
504}
Jammy Zhou27f66422015-08-03 10:27:57 +0800505
506/**
507 * Get next queued sequence number
508 *
509 * @entity The context entity
510 *
511 * return the next queued sequence number
512*/
Christian König91404fb2015-08-05 18:33:21 +0200513uint64_t amd_sched_next_queued_seq(struct amd_sched_entity *c_entity)
Jammy Zhou27f66422015-08-03 10:27:57 +0800514{
515 return atomic64_read(&c_entity->last_queued_v_seq) + 1;
516}