blob: ceb5918bfbeb4b5fd4a31622ebbbd13d5702e742 [file] [log] [blame]
Jammy Zhoua72ce6f2015-05-22 18:55:07 +08001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _GPU_SCHEDULER_H_
25#define _GPU_SCHEDULER_H_
26
27#include <linux/kfifo.h>
Chunming Zhou4cef9262015-08-05 19:52:14 +080028#include <linux/fence.h>
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080029
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080030#define AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
31
32struct amd_gpu_scheduler;
Christian König432a4ff2015-08-12 11:46:04 +020033struct amd_sched_rq;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080034
35/**
36 * A scheduler entity is a wrapper around a job queue or a group
37 * of other entities. Entities take turns emitting jobs from their
38 * job queues to corresponding hardware ring based on scheduling
39 * policy.
40*/
41struct amd_sched_entity {
42 struct list_head list;
Christian König432a4ff2015-08-12 11:46:04 +020043 struct amd_sched_rq *belongto_rq;
Christian König91404fb2015-08-05 18:33:21 +020044 spinlock_t lock;
45 /* the virtual_seq is unique per context per ring */
46 atomic64_t last_queued_v_seq;
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080047 atomic64_t last_signaled_v_seq;
Christian König91404fb2015-08-05 18:33:21 +020048 /* the job_queue maintains the jobs submitted by clients */
49 struct kfifo job_queue;
50 spinlock_t queue_lock;
51 struct amd_gpu_scheduler *scheduler;
52 wait_queue_head_t wait_queue;
53 wait_queue_head_t wait_emit;
54 bool is_pending;
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080055 uint64_t fence_context;
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080056 char name[20];
Chunming Zhou1c8f8052015-08-13 13:04:06 +080057 bool need_wakeup;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080058};
59
60/**
61 * Run queue is a set of entities scheduling command submissions for
62 * one specific ring. It implements the scheduling policy that selects
63 * the next entity to emit commands from.
64*/
Christian König432a4ff2015-08-12 11:46:04 +020065struct amd_sched_rq {
66 struct mutex lock;
67 struct list_head entities;
68 struct amd_sched_entity *current_entity;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080069};
70
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080071struct amd_sched_fence {
72 struct fence base;
73 struct fence_cb cb;
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080074 struct amd_sched_entity *entity;
75 uint64_t v_seq;
76 spinlock_t lock;
77};
78
Chunming Zhou4cef9262015-08-05 19:52:14 +080079struct amd_sched_job {
80 struct list_head list;
81 struct fence_cb cb;
82 struct amd_gpu_scheduler *sched;
Chunming Zhou953e8fd2015-08-06 15:19:12 +080083 struct amd_sched_entity *s_entity;
84 void *data;
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080085 struct amd_sched_fence *s_fence;
Chunming Zhou4cef9262015-08-05 19:52:14 +080086};
87
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080088extern const struct fence_ops amd_sched_fence_ops;
89static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
90{
91 struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base);
92
93 if (__f->base.ops == &amd_sched_fence_ops)
94 return __f;
95
96 return NULL;
97}
98
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080099/**
100 * Define the backend operations called by the scheduler,
101 * these functions should be implemented in driver side
102*/
103struct amd_sched_backend_ops {
104 int (*prepare_job)(struct amd_gpu_scheduler *sched,
Christian König91404fb2015-08-05 18:33:21 +0200105 struct amd_sched_entity *c_entity,
Chunming Zhou953e8fd2015-08-06 15:19:12 +0800106 struct amd_sched_job *job);
Christian König6f0e54a2015-08-05 21:22:10 +0200107 struct fence *(*run_job)(struct amd_gpu_scheduler *sched,
108 struct amd_sched_entity *c_entity,
109 struct amd_sched_job *job);
Chunming Zhou953e8fd2015-08-06 15:19:12 +0800110 void (*process_job)(struct amd_gpu_scheduler *sched,
111 struct amd_sched_job *job);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800112};
113
114/**
115 * One scheduler is implemented for each hardware ring
116*/
117struct amd_gpu_scheduler {
118 void *device;
119 struct task_struct *thread;
Christian König432a4ff2015-08-12 11:46:04 +0200120 struct amd_sched_rq sched_rq;
121 struct amd_sched_rq kernel_rq;
Chunming Zhou4cef9262015-08-05 19:52:14 +0800122 struct list_head active_hw_rq;
123 atomic64_t hw_rq_count;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800124 struct amd_sched_backend_ops *ops;
125 uint32_t ring_id;
126 uint32_t granularity; /* in ms unit */
127 uint32_t preemption;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800128 wait_queue_head_t wait_queue;
Christian König91404fb2015-08-05 18:33:21 +0200129 struct amd_sched_entity *current_entity;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800130 struct mutex sched_lock;
131 spinlock_t queue_lock;
Chunming Zhou4cef9262015-08-05 19:52:14 +0800132 uint32_t hw_submission_limit;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800133};
134
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800135struct amd_gpu_scheduler *amd_sched_create(void *device,
136 struct amd_sched_backend_ops *ops,
137 uint32_t ring,
138 uint32_t granularity,
Jammy Zhou4afcb302015-07-30 16:44:05 +0800139 uint32_t preemption,
140 uint32_t hw_submission);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800141int amd_sched_destroy(struct amd_gpu_scheduler *sched);
142
Chunming Zhou80de5912015-08-05 19:07:08 +0800143int amd_sched_push_job(struct amd_gpu_scheduler *sched,
Christian König91404fb2015-08-05 18:33:21 +0200144 struct amd_sched_entity *c_entity,
Chunming Zhouf556cb0c2015-08-02 11:18:04 +0800145 void *data,
146 struct amd_sched_fence **fence);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800147
Christian König91404fb2015-08-05 18:33:21 +0200148int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
149 struct amd_sched_entity *entity,
Christian König432a4ff2015-08-12 11:46:04 +0200150 struct amd_sched_rq *rq,
Christian König91404fb2015-08-05 18:33:21 +0200151 uint32_t jobs);
152int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
153 struct amd_sched_entity *entity);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800154
Christian König91404fb2015-08-05 18:33:21 +0200155uint64_t amd_sched_next_queued_seq(struct amd_sched_entity *c_entity);
Jammy Zhou27f66422015-08-03 10:27:57 +0800156
Chunming Zhouf556cb0c2015-08-02 11:18:04 +0800157struct amd_sched_fence *amd_sched_fence_create(
158 struct amd_sched_entity *s_entity);
159void amd_sched_fence_signal(struct amd_sched_fence *fence);
160
161
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800162#endif