Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2015 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * |
| 22 | * |
| 23 | */ |
| 24 | #include <linux/kthread.h> |
| 25 | #include <linux/wait.h> |
| 26 | #include <linux/sched.h> |
| 27 | #include <drm/drmP.h> |
| 28 | #include "gpu_scheduler.h" |
| 29 | |
Chunming Zhou | 353da3c | 2015-09-07 16:06:53 +0800 | [diff] [blame] | 30 | #define CREATE_TRACE_POINTS |
| 31 | #include "gpu_sched_trace.h" |
| 32 | |
Christian König | 69bd5bf | 2015-08-26 11:31:23 +0200 | [diff] [blame] | 33 | static struct amd_sched_job * |
| 34 | amd_sched_entity_pop_job(struct amd_sched_entity *entity); |
Christian König | 8807900 | 2015-08-24 14:29:40 +0200 | [diff] [blame] | 35 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); |
| 36 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 37 | /* Initialize a given run queue struct */ |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 38 | static void amd_sched_rq_init(struct amd_sched_rq *rq) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 39 | { |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 40 | spin_lock_init(&rq->lock); |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 41 | INIT_LIST_HEAD(&rq->entities); |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 42 | rq->current_entity = NULL; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 43 | } |
| 44 | |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 45 | static void amd_sched_rq_add_entity(struct amd_sched_rq *rq, |
| 46 | struct amd_sched_entity *entity) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 47 | { |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 48 | spin_lock(&rq->lock); |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 49 | list_add_tail(&entity->list, &rq->entities); |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 50 | spin_unlock(&rq->lock); |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 51 | } |
| 52 | |
| 53 | static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, |
| 54 | struct amd_sched_entity *entity) |
| 55 | { |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 56 | spin_lock(&rq->lock); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 57 | list_del_init(&entity->list); |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 58 | if (rq->current_entity == entity) |
| 59 | rq->current_entity = NULL; |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 60 | spin_unlock(&rq->lock); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 61 | } |
| 62 | |
| 63 | /** |
Christian König | 69bd5bf | 2015-08-26 11:31:23 +0200 | [diff] [blame] | 64 | * Select next job from a specified run queue with round robin policy. |
| 65 | * Return NULL if nothing available. |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 66 | */ |
Christian König | 69bd5bf | 2015-08-26 11:31:23 +0200 | [diff] [blame] | 67 | static struct amd_sched_job * |
| 68 | amd_sched_rq_select_job(struct amd_sched_rq *rq) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 69 | { |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 70 | struct amd_sched_entity *entity; |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 71 | struct amd_sched_job *sched_job; |
Christian König | 4cd7f42c | 2015-08-05 18:18:52 +0200 | [diff] [blame] | 72 | |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 73 | spin_lock(&rq->lock); |
| 74 | |
| 75 | entity = rq->current_entity; |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 76 | if (entity) { |
| 77 | list_for_each_entry_continue(entity, &rq->entities, list) { |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 78 | sched_job = amd_sched_entity_pop_job(entity); |
| 79 | if (sched_job) { |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 80 | rq->current_entity = entity; |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 81 | spin_unlock(&rq->lock); |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 82 | return sched_job; |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 83 | } |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 84 | } |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 85 | } |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 86 | |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 87 | list_for_each_entry(entity, &rq->entities, list) { |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 88 | |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 89 | sched_job = amd_sched_entity_pop_job(entity); |
| 90 | if (sched_job) { |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 91 | rq->current_entity = entity; |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 92 | spin_unlock(&rq->lock); |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 93 | return sched_job; |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 94 | } |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 95 | |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 96 | if (entity == rq->current_entity) |
| 97 | break; |
| 98 | } |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 99 | |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 100 | spin_unlock(&rq->lock); |
| 101 | |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 102 | return NULL; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 103 | } |
| 104 | |
| 105 | /** |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 106 | * Init a context entity used by scheduler when submit to HW ring. |
| 107 | * |
| 108 | * @sched The pointer to the scheduler |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 109 | * @entity The pointer to a valid amd_sched_entity |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 110 | * @rq The run queue this entity belongs |
Christian König | 0e89d0c | 2015-08-04 16:58:36 +0200 | [diff] [blame] | 111 | * @kernel If this is an entity for the kernel |
Jammy Zhou | 1333f72 | 2015-07-30 16:36:58 +0800 | [diff] [blame] | 112 | * @jobs The max number of jobs in the job queue |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 113 | * |
| 114 | * return 0 if succeed. negative error code on failure |
| 115 | */ |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 116 | int amd_sched_entity_init(struct amd_gpu_scheduler *sched, |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 117 | struct amd_sched_entity *entity, |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 118 | struct amd_sched_rq *rq, |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 119 | uint32_t jobs) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 120 | { |
Christian König | 0f75aee | 2015-09-07 18:07:14 +0200 | [diff] [blame] | 121 | int r; |
| 122 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 123 | if (!(sched && entity && rq)) |
| 124 | return -EINVAL; |
| 125 | |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 126 | memset(entity, 0, sizeof(struct amd_sched_entity)); |
Christian König | 0f75aee | 2015-09-07 18:07:14 +0200 | [diff] [blame] | 127 | INIT_LIST_HEAD(&entity->list); |
| 128 | entity->rq = rq; |
| 129 | entity->sched = sched; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 130 | |
| 131 | spin_lock_init(&entity->queue_lock); |
Christian König | 0f75aee | 2015-09-07 18:07:14 +0200 | [diff] [blame] | 132 | r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL); |
| 133 | if (r) |
| 134 | return r; |
| 135 | |
Christian König | ce882e6 | 2015-08-19 15:00:55 +0200 | [diff] [blame] | 136 | atomic_set(&entity->fence_seq, 0); |
Christian König | 0f75aee | 2015-09-07 18:07:14 +0200 | [diff] [blame] | 137 | entity->fence_context = fence_context_alloc(1); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 138 | |
| 139 | /* Add the entity to the run queue */ |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 140 | amd_sched_rq_add_entity(rq, entity); |
Christian König | 0f75aee | 2015-09-07 18:07:14 +0200 | [diff] [blame] | 141 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 142 | return 0; |
| 143 | } |
| 144 | |
| 145 | /** |
| 146 | * Query if entity is initialized |
| 147 | * |
| 148 | * @sched Pointer to scheduler instance |
| 149 | * @entity The pointer to a valid scheduler entity |
| 150 | * |
| 151 | * return true if entity is initialized, false otherwise |
| 152 | */ |
Christian König | d54fdb9 | 2015-08-20 17:03:48 +0200 | [diff] [blame] | 153 | static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched, |
| 154 | struct amd_sched_entity *entity) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 155 | { |
Christian König | 0f75aee | 2015-09-07 18:07:14 +0200 | [diff] [blame] | 156 | return entity->sched == sched && |
| 157 | entity->rq != NULL; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 158 | } |
| 159 | |
Christian König | aef4852 | 2015-08-20 14:47:46 +0200 | [diff] [blame] | 160 | /** |
| 161 | * Check if entity is idle |
| 162 | * |
| 163 | * @entity The pointer to a valid scheduler entity |
| 164 | * |
| 165 | * Return true if entity don't has any unscheduled jobs. |
| 166 | */ |
| 167 | static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 168 | { |
Christian König | aef4852 | 2015-08-20 14:47:46 +0200 | [diff] [blame] | 169 | rmb(); |
| 170 | if (kfifo_is_empty(&entity->job_queue)) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 171 | return true; |
| 172 | |
| 173 | return false; |
| 174 | } |
| 175 | |
| 176 | /** |
| 177 | * Destroy a context entity |
| 178 | * |
| 179 | * @sched Pointer to scheduler instance |
| 180 | * @entity The pointer to a valid scheduler entity |
| 181 | * |
Christian König | 062c7fb | 2015-08-21 15:46:43 +0200 | [diff] [blame] | 182 | * Cleanup and free the allocated resources. |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 183 | */ |
Christian König | 062c7fb | 2015-08-21 15:46:43 +0200 | [diff] [blame] | 184 | void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, |
| 185 | struct amd_sched_entity *entity) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 186 | { |
Christian König | 0f75aee | 2015-09-07 18:07:14 +0200 | [diff] [blame] | 187 | struct amd_sched_rq *rq = entity->rq; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 188 | |
Christian König | d54fdb9 | 2015-08-20 17:03:48 +0200 | [diff] [blame] | 189 | if (!amd_sched_entity_is_initialized(sched, entity)) |
Christian König | 062c7fb | 2015-08-21 15:46:43 +0200 | [diff] [blame] | 190 | return; |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 191 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 192 | /** |
| 193 | * The client will not queue more IBs during this fini, consume existing |
| 194 | * queued IBs |
| 195 | */ |
Christian König | c2b6bd7 | 2015-08-25 21:39:31 +0200 | [diff] [blame] | 196 | wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity)); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 197 | |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 198 | amd_sched_rq_remove_entity(rq, entity); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 199 | kfifo_free(&entity->job_queue); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 200 | } |
| 201 | |
Christian König | e61235d | 2015-08-25 11:05:36 +0200 | [diff] [blame] | 202 | static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) |
| 203 | { |
| 204 | struct amd_sched_entity *entity = |
| 205 | container_of(cb, struct amd_sched_entity, cb); |
| 206 | entity->dependency = NULL; |
| 207 | fence_put(f); |
Christian König | 0f75aee | 2015-09-07 18:07:14 +0200 | [diff] [blame] | 208 | amd_sched_wakeup(entity->sched); |
Christian König | e61235d | 2015-08-25 11:05:36 +0200 | [diff] [blame] | 209 | } |
| 210 | |
Christian König | 69bd5bf | 2015-08-26 11:31:23 +0200 | [diff] [blame] | 211 | static struct amd_sched_job * |
| 212 | amd_sched_entity_pop_job(struct amd_sched_entity *entity) |
| 213 | { |
Christian König | 0f75aee | 2015-09-07 18:07:14 +0200 | [diff] [blame] | 214 | struct amd_gpu_scheduler *sched = entity->sched; |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 215 | struct amd_sched_job *sched_job; |
Christian König | 69bd5bf | 2015-08-26 11:31:23 +0200 | [diff] [blame] | 216 | |
Christian König | e61235d | 2015-08-25 11:05:36 +0200 | [diff] [blame] | 217 | if (ACCESS_ONCE(entity->dependency)) |
| 218 | return NULL; |
| 219 | |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 220 | if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) |
Christian König | 69bd5bf | 2015-08-26 11:31:23 +0200 | [diff] [blame] | 221 | return NULL; |
| 222 | |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 223 | while ((entity->dependency = sched->ops->dependency(sched_job))) { |
Christian König | e61235d | 2015-08-25 11:05:36 +0200 | [diff] [blame] | 224 | |
| 225 | if (fence_add_callback(entity->dependency, &entity->cb, |
| 226 | amd_sched_entity_wakeup)) |
| 227 | fence_put(entity->dependency); |
| 228 | else |
| 229 | return NULL; |
| 230 | } |
| 231 | |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 232 | return sched_job; |
Christian König | 69bd5bf | 2015-08-26 11:31:23 +0200 | [diff] [blame] | 233 | } |
| 234 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 235 | /** |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 236 | * Helper to submit a job to the job queue |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 237 | * |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 238 | * @sched_job The pointer to job required to submit |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 239 | * |
| 240 | * Returns true if we could submit the job. |
| 241 | */ |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 242 | static bool amd_sched_entity_in(struct amd_sched_job *sched_job) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 243 | { |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 244 | struct amd_sched_entity *entity = sched_job->s_entity; |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 245 | bool added, first = false; |
| 246 | |
| 247 | spin_lock(&entity->queue_lock); |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 248 | added = kfifo_in(&entity->job_queue, &sched_job, |
| 249 | sizeof(sched_job)) == sizeof(sched_job); |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 250 | |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 251 | if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job)) |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 252 | first = true; |
| 253 | |
| 254 | spin_unlock(&entity->queue_lock); |
| 255 | |
| 256 | /* first job wakes up scheduler */ |
| 257 | if (first) |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 258 | amd_sched_wakeup(sched_job->sched); |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 259 | |
| 260 | return added; |
| 261 | } |
| 262 | |
| 263 | /** |
| 264 | * Submit a job to the job queue |
| 265 | * |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 266 | * @sched_job The pointer to job required to submit |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 267 | * |
| 268 | * Returns 0 for success, negative error code otherwise. |
| 269 | */ |
| 270 | int amd_sched_entity_push_job(struct amd_sched_job *sched_job) |
| 271 | { |
| 272 | struct amd_sched_entity *entity = sched_job->s_entity; |
Chunming Zhou | 84f76ea | 2015-08-24 12:47:36 +0800 | [diff] [blame] | 273 | struct amd_sched_fence *fence = amd_sched_fence_create( |
| 274 | entity, sched_job->owner); |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 275 | |
Chunming Zhou | f556cb0c | 2015-08-02 11:18:04 +0800 | [diff] [blame] | 276 | if (!fence) |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 277 | return -ENOMEM; |
| 278 | |
Chunming Zhou | bb977d3 | 2015-08-18 15:16:40 +0800 | [diff] [blame] | 279 | fence_get(&fence->base); |
| 280 | sched_job->s_fence = fence; |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 281 | |
Christian König | 0f75aee | 2015-09-07 18:07:14 +0200 | [diff] [blame] | 282 | wait_event(entity->sched->job_scheduled, |
Chunming Zhou | c9f0fe5 | 2015-08-31 15:46:12 +0800 | [diff] [blame] | 283 | amd_sched_entity_in(sched_job)); |
Chunming Zhou | 353da3c | 2015-09-07 16:06:53 +0800 | [diff] [blame] | 284 | trace_amd_sched_job(sched_job); |
Chunming Zhou | c9f0fe5 | 2015-08-31 15:46:12 +0800 | [diff] [blame] | 285 | return 0; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 286 | } |
| 287 | |
Christian König | e688b728 | 2015-08-20 17:01:01 +0200 | [diff] [blame] | 288 | /** |
| 289 | * Return ture if we can push more jobs to the hw. |
| 290 | */ |
| 291 | static bool amd_sched_ready(struct amd_gpu_scheduler *sched) |
| 292 | { |
| 293 | return atomic_read(&sched->hw_rq_count) < |
| 294 | sched->hw_submission_limit; |
| 295 | } |
| 296 | |
| 297 | /** |
Christian König | 8807900 | 2015-08-24 14:29:40 +0200 | [diff] [blame] | 298 | * Wake up the scheduler when it is ready |
| 299 | */ |
| 300 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) |
| 301 | { |
| 302 | if (amd_sched_ready(sched)) |
Christian König | c2b6bd7 | 2015-08-25 21:39:31 +0200 | [diff] [blame] | 303 | wake_up_interruptible(&sched->wake_up_worker); |
Christian König | 8807900 | 2015-08-24 14:29:40 +0200 | [diff] [blame] | 304 | } |
| 305 | |
| 306 | /** |
Christian König | 69bd5bf | 2015-08-26 11:31:23 +0200 | [diff] [blame] | 307 | * Select next to run |
Christian König | e688b728 | 2015-08-20 17:01:01 +0200 | [diff] [blame] | 308 | */ |
Christian König | 69bd5bf | 2015-08-26 11:31:23 +0200 | [diff] [blame] | 309 | static struct amd_sched_job * |
| 310 | amd_sched_select_job(struct amd_gpu_scheduler *sched) |
Christian König | e688b728 | 2015-08-20 17:01:01 +0200 | [diff] [blame] | 311 | { |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 312 | struct amd_sched_job *sched_job; |
Christian König | e688b728 | 2015-08-20 17:01:01 +0200 | [diff] [blame] | 313 | |
| 314 | if (!amd_sched_ready(sched)) |
| 315 | return NULL; |
| 316 | |
| 317 | /* Kernel run queue has higher priority than normal run queue*/ |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 318 | sched_job = amd_sched_rq_select_job(&sched->kernel_rq); |
| 319 | if (sched_job == NULL) |
| 320 | sched_job = amd_sched_rq_select_job(&sched->sched_rq); |
Christian König | e688b728 | 2015-08-20 17:01:01 +0200 | [diff] [blame] | 321 | |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 322 | return sched_job; |
Christian König | e688b728 | 2015-08-20 17:01:01 +0200 | [diff] [blame] | 323 | } |
| 324 | |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 325 | static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) |
| 326 | { |
Christian König | 258f3f9 | 2015-08-31 17:02:52 +0200 | [diff] [blame] | 327 | struct amd_sched_fence *s_fence = |
| 328 | container_of(cb, struct amd_sched_fence, cb); |
Christian König | 9b398fa | 2015-09-07 18:16:49 +0200 | [diff] [blame^] | 329 | struct amd_gpu_scheduler *sched = s_fence->sched; |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 330 | |
Christian König | c746ba2 | 2015-08-19 16:12:15 +0200 | [diff] [blame] | 331 | atomic_dec(&sched->hw_rq_count); |
Christian König | 258f3f9 | 2015-08-31 17:02:52 +0200 | [diff] [blame] | 332 | amd_sched_fence_signal(s_fence); |
| 333 | fence_put(&s_fence->base); |
Christian König | c2b6bd7 | 2015-08-25 21:39:31 +0200 | [diff] [blame] | 334 | wake_up_interruptible(&sched->wake_up_worker); |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 335 | } |
| 336 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 337 | static int amd_sched_main(void *param) |
| 338 | { |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 339 | struct sched_param sparam = {.sched_priority = 1}; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 340 | struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param; |
Alex Deucher | 5134e99 | 2015-09-04 00:11:54 -0400 | [diff] [blame] | 341 | int r, count; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 342 | |
| 343 | sched_setscheduler(current, SCHED_FIFO, &sparam); |
| 344 | |
| 345 | while (!kthread_should_stop()) { |
Christian König | 69bd5bf | 2015-08-26 11:31:23 +0200 | [diff] [blame] | 346 | struct amd_sched_entity *entity; |
Christian König | 258f3f9 | 2015-08-31 17:02:52 +0200 | [diff] [blame] | 347 | struct amd_sched_fence *s_fence; |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 348 | struct amd_sched_job *sched_job; |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 349 | struct fence *fence; |
| 350 | |
Christian König | c2b6bd7 | 2015-08-25 21:39:31 +0200 | [diff] [blame] | 351 | wait_event_interruptible(sched->wake_up_worker, |
Christian König | f85a6dd | 2015-08-19 17:37:52 +0200 | [diff] [blame] | 352 | kthread_should_stop() || |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 353 | (sched_job = amd_sched_select_job(sched))); |
Christian König | f85a6dd | 2015-08-19 17:37:52 +0200 | [diff] [blame] | 354 | |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 355 | if (!sched_job) |
Christian König | f85a6dd | 2015-08-19 17:37:52 +0200 | [diff] [blame] | 356 | continue; |
| 357 | |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 358 | entity = sched_job->s_entity; |
| 359 | s_fence = sched_job->s_fence; |
Christian König | b034b57 | 2015-08-20 17:08:25 +0200 | [diff] [blame] | 360 | atomic_inc(&sched->hw_rq_count); |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 361 | fence = sched->ops->run_job(sched_job); |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 362 | if (fence) { |
Christian König | 258f3f9 | 2015-08-31 17:02:52 +0200 | [diff] [blame] | 363 | r = fence_add_callback(fence, &s_fence->cb, |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 364 | amd_sched_process_job); |
| 365 | if (r == -ENOENT) |
Christian König | 258f3f9 | 2015-08-31 17:02:52 +0200 | [diff] [blame] | 366 | amd_sched_process_job(fence, &s_fence->cb); |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 367 | else if (r) |
| 368 | DRM_ERROR("fence add callback failed (%d)\n", r); |
| 369 | fence_put(fence); |
Christian König | 27439fc | 2015-09-02 12:03:06 +0200 | [diff] [blame] | 370 | } else { |
| 371 | DRM_ERROR("Failed to run job!\n"); |
Christian König | 258f3f9 | 2015-08-31 17:02:52 +0200 | [diff] [blame] | 372 | amd_sched_process_job(NULL, &s_fence->cb); |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 373 | } |
Christian König | aef4852 | 2015-08-20 14:47:46 +0200 | [diff] [blame] | 374 | |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 375 | count = kfifo_out(&entity->job_queue, &sched_job, |
| 376 | sizeof(sched_job)); |
| 377 | WARN_ON(count != sizeof(sched_job)); |
Christian König | c2b6bd7 | 2015-08-25 21:39:31 +0200 | [diff] [blame] | 378 | wake_up(&sched->job_scheduled); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 379 | } |
| 380 | return 0; |
| 381 | } |
| 382 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 383 | /** |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 384 | * Create a gpu scheduler |
| 385 | * |
Christian König | 69f7dd6 | 2015-08-20 17:24:40 +0200 | [diff] [blame] | 386 | * @ops The backend operations for this scheduler. |
| 387 | * @ring The the ring id for the scheduler. |
| 388 | * @hw_submissions Number of hw submissions to do. |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 389 | * |
Christian König | 69f7dd6 | 2015-08-20 17:24:40 +0200 | [diff] [blame] | 390 | * Return the pointer to scheduler for success, otherwise return NULL |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 391 | */ |
Christian König | 69f7dd6 | 2015-08-20 17:24:40 +0200 | [diff] [blame] | 392 | struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops, |
Chunming Zhou | f38fdfd | 2015-08-24 11:35:26 +0800 | [diff] [blame] | 393 | unsigned ring, unsigned hw_submission, |
| 394 | void *priv) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 395 | { |
| 396 | struct amd_gpu_scheduler *sched; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 397 | |
| 398 | sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL); |
| 399 | if (!sched) |
| 400 | return NULL; |
| 401 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 402 | sched->ops = ops; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 403 | sched->ring_id = ring; |
Chunming Zhou | 4cef926 | 2015-08-05 19:52:14 +0800 | [diff] [blame] | 404 | sched->hw_submission_limit = hw_submission; |
Chunming Zhou | f38fdfd | 2015-08-24 11:35:26 +0800 | [diff] [blame] | 405 | sched->priv = priv; |
Christian König | c14692f0 | 2015-08-21 15:18:47 +0200 | [diff] [blame] | 406 | snprintf(sched->name, sizeof(sched->name), "amdgpu[%d]", ring); |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 407 | amd_sched_rq_init(&sched->sched_rq); |
| 408 | amd_sched_rq_init(&sched->kernel_rq); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 409 | |
Christian König | c2b6bd7 | 2015-08-25 21:39:31 +0200 | [diff] [blame] | 410 | init_waitqueue_head(&sched->wake_up_worker); |
| 411 | init_waitqueue_head(&sched->job_scheduled); |
Christian König | c746ba2 | 2015-08-19 16:12:15 +0200 | [diff] [blame] | 412 | atomic_set(&sched->hw_rq_count, 0); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 413 | /* Each scheduler will run on a seperate kernel thread */ |
Christian König | c14692f0 | 2015-08-21 15:18:47 +0200 | [diff] [blame] | 414 | sched->thread = kthread_run(amd_sched_main, sched, sched->name); |
Christian König | f495659 | 2015-08-20 16:59:38 +0200 | [diff] [blame] | 415 | if (IS_ERR(sched->thread)) { |
| 416 | DRM_ERROR("Failed to create scheduler for id %d.\n", ring); |
| 417 | kfree(sched); |
| 418 | return NULL; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 419 | } |
| 420 | |
Christian König | f495659 | 2015-08-20 16:59:38 +0200 | [diff] [blame] | 421 | return sched; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 422 | } |
| 423 | |
| 424 | /** |
| 425 | * Destroy a gpu scheduler |
| 426 | * |
| 427 | * @sched The pointer to the scheduler |
| 428 | * |
| 429 | * return 0 if succeed. -1 if failed. |
| 430 | */ |
| 431 | int amd_sched_destroy(struct amd_gpu_scheduler *sched) |
| 432 | { |
| 433 | kthread_stop(sched->thread); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 434 | kfree(sched); |
| 435 | return 0; |
| 436 | } |