Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2015 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * |
| 22 | * |
| 23 | */ |
| 24 | #include <linux/kthread.h> |
| 25 | #include <linux/wait.h> |
| 26 | #include <linux/sched.h> |
| 27 | #include <drm/drmP.h> |
| 28 | #include "gpu_scheduler.h" |
| 29 | |
Chunming Zhou | 353da3c | 2015-09-07 16:06:53 +0800 | [diff] [blame] | 30 | #define CREATE_TRACE_POINTS |
| 31 | #include "gpu_sched_trace.h" |
| 32 | |
Christian König | 3d65193 | 2015-11-12 21:10:35 +0100 | [diff] [blame] | 33 | static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); |
Christian König | 8807900 | 2015-08-24 14:29:40 +0200 | [diff] [blame] | 34 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); |
Chunming Zhou | ec75f57 | 2016-06-29 15:23:55 +0800 | [diff] [blame] | 35 | static void amd_sched_process_job(struct fence *f, struct fence_cb *cb); |
Christian König | 8807900 | 2015-08-24 14:29:40 +0200 | [diff] [blame] | 36 | |
Chunming Zhou | f5617f9 | 2015-11-05 11:41:50 +0800 | [diff] [blame] | 37 | struct kmem_cache *sched_fence_slab; |
| 38 | atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); |
| 39 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 40 | /* Initialize a given run queue struct */ |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 41 | static void amd_sched_rq_init(struct amd_sched_rq *rq) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 42 | { |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 43 | spin_lock_init(&rq->lock); |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 44 | INIT_LIST_HEAD(&rq->entities); |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 45 | rq->current_entity = NULL; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 46 | } |
| 47 | |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 48 | static void amd_sched_rq_add_entity(struct amd_sched_rq *rq, |
| 49 | struct amd_sched_entity *entity) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 50 | { |
Chunming Zhou | e8deea2 | 2015-12-11 18:22:52 +0800 | [diff] [blame] | 51 | if (!list_empty(&entity->list)) |
| 52 | return; |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 53 | spin_lock(&rq->lock); |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 54 | list_add_tail(&entity->list, &rq->entities); |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 55 | spin_unlock(&rq->lock); |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 56 | } |
| 57 | |
| 58 | static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, |
| 59 | struct amd_sched_entity *entity) |
| 60 | { |
Chunming Zhou | e8deea2 | 2015-12-11 18:22:52 +0800 | [diff] [blame] | 61 | if (list_empty(&entity->list)) |
| 62 | return; |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 63 | spin_lock(&rq->lock); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 64 | list_del_init(&entity->list); |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 65 | if (rq->current_entity == entity) |
| 66 | rq->current_entity = NULL; |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 67 | spin_unlock(&rq->lock); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 68 | } |
| 69 | |
| 70 | /** |
Christian König | 3d65193 | 2015-11-12 21:10:35 +0100 | [diff] [blame] | 71 | * Select an entity which could provide a job to run |
| 72 | * |
| 73 | * @rq The run queue to check. |
| 74 | * |
| 75 | * Try to find a ready entity, returns NULL if none found. |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 76 | */ |
Christian König | 3d65193 | 2015-11-12 21:10:35 +0100 | [diff] [blame] | 77 | static struct amd_sched_entity * |
| 78 | amd_sched_rq_select_entity(struct amd_sched_rq *rq) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 79 | { |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 80 | struct amd_sched_entity *entity; |
Christian König | 4cd7f42c | 2015-08-05 18:18:52 +0200 | [diff] [blame] | 81 | |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 82 | spin_lock(&rq->lock); |
| 83 | |
| 84 | entity = rq->current_entity; |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 85 | if (entity) { |
| 86 | list_for_each_entry_continue(entity, &rq->entities, list) { |
Christian König | 3d65193 | 2015-11-12 21:10:35 +0100 | [diff] [blame] | 87 | if (amd_sched_entity_is_ready(entity)) { |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 88 | rq->current_entity = entity; |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 89 | spin_unlock(&rq->lock); |
Christian König | 3d65193 | 2015-11-12 21:10:35 +0100 | [diff] [blame] | 90 | return entity; |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 91 | } |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 92 | } |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 93 | } |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 94 | |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 95 | list_for_each_entry(entity, &rq->entities, list) { |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 96 | |
Christian König | 3d65193 | 2015-11-12 21:10:35 +0100 | [diff] [blame] | 97 | if (amd_sched_entity_is_ready(entity)) { |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 98 | rq->current_entity = entity; |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 99 | spin_unlock(&rq->lock); |
Christian König | 3d65193 | 2015-11-12 21:10:35 +0100 | [diff] [blame] | 100 | return entity; |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 101 | } |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 102 | |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 103 | if (entity == rq->current_entity) |
| 104 | break; |
| 105 | } |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 106 | |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 107 | spin_unlock(&rq->lock); |
| 108 | |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 109 | return NULL; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 110 | } |
| 111 | |
| 112 | /** |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 113 | * Init a context entity used by scheduler when submit to HW ring. |
| 114 | * |
| 115 | * @sched The pointer to the scheduler |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 116 | * @entity The pointer to a valid amd_sched_entity |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 117 | * @rq The run queue this entity belongs |
Christian König | 0e89d0c | 2015-08-04 16:58:36 +0200 | [diff] [blame] | 118 | * @kernel If this is an entity for the kernel |
Jammy Zhou | 1333f72 | 2015-07-30 16:36:58 +0800 | [diff] [blame] | 119 | * @jobs The max number of jobs in the job queue |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 120 | * |
| 121 | * return 0 if succeed. negative error code on failure |
| 122 | */ |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 123 | int amd_sched_entity_init(struct amd_gpu_scheduler *sched, |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 124 | struct amd_sched_entity *entity, |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 125 | struct amd_sched_rq *rq, |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 126 | uint32_t jobs) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 127 | { |
Christian König | 0f75aee | 2015-09-07 18:07:14 +0200 | [diff] [blame] | 128 | int r; |
| 129 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 130 | if (!(sched && entity && rq)) |
| 131 | return -EINVAL; |
| 132 | |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 133 | memset(entity, 0, sizeof(struct amd_sched_entity)); |
Christian König | 0f75aee | 2015-09-07 18:07:14 +0200 | [diff] [blame] | 134 | INIT_LIST_HEAD(&entity->list); |
| 135 | entity->rq = rq; |
| 136 | entity->sched = sched; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 137 | |
| 138 | spin_lock_init(&entity->queue_lock); |
Christian König | 0f75aee | 2015-09-07 18:07:14 +0200 | [diff] [blame] | 139 | r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL); |
| 140 | if (r) |
| 141 | return r; |
| 142 | |
Christian König | ce882e6 | 2015-08-19 15:00:55 +0200 | [diff] [blame] | 143 | atomic_set(&entity->fence_seq, 0); |
Christian König | 6fc1367 | 2016-05-20 12:53:52 +0200 | [diff] [blame] | 144 | entity->fence_context = fence_context_alloc(2); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 145 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 146 | return 0; |
| 147 | } |
| 148 | |
| 149 | /** |
| 150 | * Query if entity is initialized |
| 151 | * |
| 152 | * @sched Pointer to scheduler instance |
| 153 | * @entity The pointer to a valid scheduler entity |
| 154 | * |
| 155 | * return true if entity is initialized, false otherwise |
| 156 | */ |
Christian König | d54fdb9 | 2015-08-20 17:03:48 +0200 | [diff] [blame] | 157 | static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched, |
| 158 | struct amd_sched_entity *entity) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 159 | { |
Christian König | 0f75aee | 2015-09-07 18:07:14 +0200 | [diff] [blame] | 160 | return entity->sched == sched && |
| 161 | entity->rq != NULL; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 162 | } |
| 163 | |
Christian König | aef4852 | 2015-08-20 14:47:46 +0200 | [diff] [blame] | 164 | /** |
| 165 | * Check if entity is idle |
| 166 | * |
| 167 | * @entity The pointer to a valid scheduler entity |
| 168 | * |
| 169 | * Return true if entity don't has any unscheduled jobs. |
| 170 | */ |
| 171 | static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 172 | { |
Christian König | aef4852 | 2015-08-20 14:47:46 +0200 | [diff] [blame] | 173 | rmb(); |
| 174 | if (kfifo_is_empty(&entity->job_queue)) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 175 | return true; |
| 176 | |
| 177 | return false; |
| 178 | } |
| 179 | |
| 180 | /** |
Christian König | 3d65193 | 2015-11-12 21:10:35 +0100 | [diff] [blame] | 181 | * Check if entity is ready |
| 182 | * |
| 183 | * @entity The pointer to a valid scheduler entity |
| 184 | * |
| 185 | * Return true if entity could provide a job. |
| 186 | */ |
| 187 | static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity) |
| 188 | { |
| 189 | if (kfifo_is_empty(&entity->job_queue)) |
| 190 | return false; |
| 191 | |
| 192 | if (ACCESS_ONCE(entity->dependency)) |
| 193 | return false; |
| 194 | |
| 195 | return true; |
| 196 | } |
| 197 | |
| 198 | /** |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 199 | * Destroy a context entity |
| 200 | * |
| 201 | * @sched Pointer to scheduler instance |
| 202 | * @entity The pointer to a valid scheduler entity |
| 203 | * |
Christian König | 062c7fb | 2015-08-21 15:46:43 +0200 | [diff] [blame] | 204 | * Cleanup and free the allocated resources. |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 205 | */ |
Christian König | 062c7fb | 2015-08-21 15:46:43 +0200 | [diff] [blame] | 206 | void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, |
| 207 | struct amd_sched_entity *entity) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 208 | { |
Christian König | 0f75aee | 2015-09-07 18:07:14 +0200 | [diff] [blame] | 209 | struct amd_sched_rq *rq = entity->rq; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 210 | |
Christian König | d54fdb9 | 2015-08-20 17:03:48 +0200 | [diff] [blame] | 211 | if (!amd_sched_entity_is_initialized(sched, entity)) |
Christian König | 062c7fb | 2015-08-21 15:46:43 +0200 | [diff] [blame] | 212 | return; |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 213 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 214 | /** |
| 215 | * The client will not queue more IBs during this fini, consume existing |
| 216 | * queued IBs |
| 217 | */ |
Christian König | c2b6bd7 | 2015-08-25 21:39:31 +0200 | [diff] [blame] | 218 | wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity)); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 219 | |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 220 | amd_sched_rq_remove_entity(rq, entity); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 221 | kfifo_free(&entity->job_queue); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 222 | } |
| 223 | |
Christian König | e61235d | 2015-08-25 11:05:36 +0200 | [diff] [blame] | 224 | static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) |
| 225 | { |
| 226 | struct amd_sched_entity *entity = |
| 227 | container_of(cb, struct amd_sched_entity, cb); |
| 228 | entity->dependency = NULL; |
| 229 | fence_put(f); |
Christian König | 0f75aee | 2015-09-07 18:07:14 +0200 | [diff] [blame] | 230 | amd_sched_wakeup(entity->sched); |
Christian König | e61235d | 2015-08-25 11:05:36 +0200 | [diff] [blame] | 231 | } |
| 232 | |
Monk Liu | 777dbd4 | 2016-01-26 14:59:57 +0800 | [diff] [blame] | 233 | static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb) |
| 234 | { |
| 235 | struct amd_sched_entity *entity = |
| 236 | container_of(cb, struct amd_sched_entity, cb); |
| 237 | entity->dependency = NULL; |
| 238 | fence_put(f); |
| 239 | } |
| 240 | |
Christian König | 393a0bd | 2015-11-05 12:57:10 +0100 | [diff] [blame] | 241 | static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) |
| 242 | { |
| 243 | struct amd_gpu_scheduler *sched = entity->sched; |
| 244 | struct fence * fence = entity->dependency; |
| 245 | struct amd_sched_fence *s_fence; |
| 246 | |
| 247 | if (fence->context == entity->fence_context) { |
| 248 | /* We can ignore fences from ourself */ |
| 249 | fence_put(entity->dependency); |
| 250 | return false; |
| 251 | } |
| 252 | |
| 253 | s_fence = to_amd_sched_fence(fence); |
| 254 | if (s_fence && s_fence->sched == sched) { |
Christian König | 393a0bd | 2015-11-05 12:57:10 +0100 | [diff] [blame] | 255 | |
Christian König | 6fc1367 | 2016-05-20 12:53:52 +0200 | [diff] [blame] | 256 | /* |
| 257 | * Fence is from the same scheduler, only need to wait for |
| 258 | * it to be scheduled |
| 259 | */ |
| 260 | fence = fence_get(&s_fence->scheduled); |
| 261 | fence_put(entity->dependency); |
| 262 | entity->dependency = fence; |
| 263 | if (!fence_add_callback(fence, &entity->cb, |
| 264 | amd_sched_entity_clear_dep)) |
| 265 | return true; |
| 266 | |
| 267 | /* Ignore it when it is already scheduled */ |
| 268 | fence_put(fence); |
| 269 | return false; |
Christian König | 393a0bd | 2015-11-05 12:57:10 +0100 | [diff] [blame] | 270 | } |
| 271 | |
| 272 | if (!fence_add_callback(entity->dependency, &entity->cb, |
| 273 | amd_sched_entity_wakeup)) |
| 274 | return true; |
| 275 | |
| 276 | fence_put(entity->dependency); |
| 277 | return false; |
| 278 | } |
| 279 | |
Christian König | 69bd5bf | 2015-08-26 11:31:23 +0200 | [diff] [blame] | 280 | static struct amd_sched_job * |
| 281 | amd_sched_entity_pop_job(struct amd_sched_entity *entity) |
| 282 | { |
Christian König | 0f75aee | 2015-09-07 18:07:14 +0200 | [diff] [blame] | 283 | struct amd_gpu_scheduler *sched = entity->sched; |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 284 | struct amd_sched_job *sched_job; |
Christian König | 69bd5bf | 2015-08-26 11:31:23 +0200 | [diff] [blame] | 285 | |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 286 | if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) |
Christian König | 69bd5bf | 2015-08-26 11:31:23 +0200 | [diff] [blame] | 287 | return NULL; |
| 288 | |
Christian König | 393a0bd | 2015-11-05 12:57:10 +0100 | [diff] [blame] | 289 | while ((entity->dependency = sched->ops->dependency(sched_job))) |
| 290 | if (amd_sched_entity_add_dependency_cb(entity)) |
Christian König | e61235d | 2015-08-25 11:05:36 +0200 | [diff] [blame] | 291 | return NULL; |
Christian König | e61235d | 2015-08-25 11:05:36 +0200 | [diff] [blame] | 292 | |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 293 | return sched_job; |
Christian König | 69bd5bf | 2015-08-26 11:31:23 +0200 | [diff] [blame] | 294 | } |
| 295 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 296 | /** |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 297 | * Helper to submit a job to the job queue |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 298 | * |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 299 | * @sched_job The pointer to job required to submit |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 300 | * |
| 301 | * Returns true if we could submit the job. |
| 302 | */ |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 303 | static bool amd_sched_entity_in(struct amd_sched_job *sched_job) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 304 | { |
Nicolai Hähnle | 786b521 | 2015-12-02 17:35:12 +0100 | [diff] [blame] | 305 | struct amd_gpu_scheduler *sched = sched_job->sched; |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 306 | struct amd_sched_entity *entity = sched_job->s_entity; |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 307 | bool added, first = false; |
| 308 | |
| 309 | spin_lock(&entity->queue_lock); |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 310 | added = kfifo_in(&entity->job_queue, &sched_job, |
| 311 | sizeof(sched_job)) == sizeof(sched_job); |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 312 | |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 313 | if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job)) |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 314 | first = true; |
| 315 | |
| 316 | spin_unlock(&entity->queue_lock); |
| 317 | |
| 318 | /* first job wakes up scheduler */ |
Chunming Zhou | e8deea2 | 2015-12-11 18:22:52 +0800 | [diff] [blame] | 319 | if (first) { |
| 320 | /* Add the entity to the run queue */ |
| 321 | amd_sched_rq_add_entity(entity->rq, entity); |
Nicolai Hähnle | 786b521 | 2015-12-02 17:35:12 +0100 | [diff] [blame] | 322 | amd_sched_wakeup(sched); |
Chunming Zhou | e8deea2 | 2015-12-11 18:22:52 +0800 | [diff] [blame] | 323 | } |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 324 | return added; |
| 325 | } |
| 326 | |
Monk Liu | 0de2479 | 2016-03-04 18:51:02 +0800 | [diff] [blame] | 327 | /* job_finish is called after hw fence signaled, and |
| 328 | * the job had already been deleted from ring_mirror_list |
| 329 | */ |
Christian König | c5f74f7 | 2016-05-19 09:54:15 +0200 | [diff] [blame] | 330 | static void amd_sched_job_finish(struct work_struct *work) |
Monk Liu | 0de2479 | 2016-03-04 18:51:02 +0800 | [diff] [blame] | 331 | { |
Christian König | c5f74f7 | 2016-05-19 09:54:15 +0200 | [diff] [blame] | 332 | struct amd_sched_job *s_job = container_of(work, struct amd_sched_job, |
| 333 | finish_work); |
Monk Liu | 0de2479 | 2016-03-04 18:51:02 +0800 | [diff] [blame] | 334 | struct amd_gpu_scheduler *sched = s_job->sched; |
| 335 | |
Christian König | f42d20a9 | 2016-05-18 15:40:58 +0200 | [diff] [blame] | 336 | /* remove job from ring_mirror_list */ |
Christian König | 1059e11 | 2016-06-13 16:12:43 +0200 | [diff] [blame] | 337 | spin_lock(&sched->job_list_lock); |
Christian König | f42d20a9 | 2016-05-18 15:40:58 +0200 | [diff] [blame] | 338 | list_del_init(&s_job->node); |
Monk Liu | 0de2479 | 2016-03-04 18:51:02 +0800 | [diff] [blame] | 339 | if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { |
Christian König | c5f74f7 | 2016-05-19 09:54:15 +0200 | [diff] [blame] | 340 | struct amd_sched_job *next; |
| 341 | |
Christian König | 1059e11 | 2016-06-13 16:12:43 +0200 | [diff] [blame] | 342 | spin_unlock(&sched->job_list_lock); |
Christian König | c5f74f7 | 2016-05-19 09:54:15 +0200 | [diff] [blame] | 343 | cancel_delayed_work_sync(&s_job->work_tdr); |
Christian König | 1059e11 | 2016-06-13 16:12:43 +0200 | [diff] [blame] | 344 | spin_lock(&sched->job_list_lock); |
Monk Liu | 0de2479 | 2016-03-04 18:51:02 +0800 | [diff] [blame] | 345 | |
| 346 | /* queue TDR for next job */ |
| 347 | next = list_first_entry_or_null(&sched->ring_mirror_list, |
| 348 | struct amd_sched_job, node); |
| 349 | |
Christian König | c5f74f7 | 2016-05-19 09:54:15 +0200 | [diff] [blame] | 350 | if (next) |
Monk Liu | 0de2479 | 2016-03-04 18:51:02 +0800 | [diff] [blame] | 351 | schedule_delayed_work(&next->work_tdr, sched->timeout); |
Monk Liu | 0de2479 | 2016-03-04 18:51:02 +0800 | [diff] [blame] | 352 | } |
Christian König | 1059e11 | 2016-06-13 16:12:43 +0200 | [diff] [blame] | 353 | spin_unlock(&sched->job_list_lock); |
Christian König | c5f74f7 | 2016-05-19 09:54:15 +0200 | [diff] [blame] | 354 | sched->ops->free_job(s_job); |
| 355 | } |
| 356 | |
| 357 | static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb) |
| 358 | { |
| 359 | struct amd_sched_job *job = container_of(cb, struct amd_sched_job, |
| 360 | finish_cb); |
| 361 | schedule_work(&job->finish_work); |
Monk Liu | 0de2479 | 2016-03-04 18:51:02 +0800 | [diff] [blame] | 362 | } |
| 363 | |
Christian König | 7392c32 | 2016-05-18 13:00:38 +0200 | [diff] [blame] | 364 | static void amd_sched_job_begin(struct amd_sched_job *s_job) |
Monk Liu | 0de2479 | 2016-03-04 18:51:02 +0800 | [diff] [blame] | 365 | { |
| 366 | struct amd_gpu_scheduler *sched = s_job->sched; |
| 367 | |
Christian König | 1059e11 | 2016-06-13 16:12:43 +0200 | [diff] [blame] | 368 | spin_lock(&sched->job_list_lock); |
Christian König | f42d20a9 | 2016-05-18 15:40:58 +0200 | [diff] [blame] | 369 | list_add_tail(&s_job->node, &sched->ring_mirror_list); |
Monk Liu | 0de2479 | 2016-03-04 18:51:02 +0800 | [diff] [blame] | 370 | if (sched->timeout != MAX_SCHEDULE_TIMEOUT && |
Christian König | 16a7133 | 2016-05-18 09:43:07 +0200 | [diff] [blame] | 371 | list_first_entry_or_null(&sched->ring_mirror_list, |
| 372 | struct amd_sched_job, node) == s_job) |
Monk Liu | 0de2479 | 2016-03-04 18:51:02 +0800 | [diff] [blame] | 373 | schedule_delayed_work(&s_job->work_tdr, sched->timeout); |
Christian König | 1059e11 | 2016-06-13 16:12:43 +0200 | [diff] [blame] | 374 | spin_unlock(&sched->job_list_lock); |
Monk Liu | 0de2479 | 2016-03-04 18:51:02 +0800 | [diff] [blame] | 375 | } |
| 376 | |
Christian König | 0e51a77 | 2016-05-18 14:19:32 +0200 | [diff] [blame] | 377 | static void amd_sched_job_timedout(struct work_struct *work) |
| 378 | { |
| 379 | struct amd_sched_job *job = container_of(work, struct amd_sched_job, |
| 380 | work_tdr.work); |
| 381 | |
| 382 | job->sched->ops->timedout_job(job); |
| 383 | } |
| 384 | |
Chunming Zhou | e686e75 | 2016-06-30 11:30:37 +0800 | [diff] [blame] | 385 | void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched) |
| 386 | { |
| 387 | struct amd_sched_job *s_job; |
| 388 | |
| 389 | spin_lock(&sched->job_list_lock); |
| 390 | list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { |
| 391 | if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) { |
| 392 | fence_put(s_job->s_fence->parent); |
| 393 | s_job->s_fence->parent = NULL; |
| 394 | } |
| 395 | } |
Chunming Zhou | bdc2eea | 2016-07-22 13:01:02 +0800 | [diff] [blame] | 396 | atomic_set(&sched->hw_rq_count, 0); |
Chunming Zhou | e686e75 | 2016-06-30 11:30:37 +0800 | [diff] [blame] | 397 | spin_unlock(&sched->job_list_lock); |
| 398 | } |
| 399 | |
Chunming Zhou | ec75f57 | 2016-06-29 15:23:55 +0800 | [diff] [blame] | 400 | void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) |
| 401 | { |
Chunming Zhou | 1c62cf9 | 2016-07-25 13:55:35 +0800 | [diff] [blame] | 402 | struct amd_sched_job *s_job, *tmp; |
Chunming Zhou | ec75f57 | 2016-06-29 15:23:55 +0800 | [diff] [blame] | 403 | int r; |
| 404 | |
| 405 | spin_lock(&sched->job_list_lock); |
| 406 | s_job = list_first_entry_or_null(&sched->ring_mirror_list, |
| 407 | struct amd_sched_job, node); |
| 408 | if (s_job) |
| 409 | schedule_delayed_work(&s_job->work_tdr, sched->timeout); |
| 410 | |
Chunming Zhou | 1c62cf9 | 2016-07-25 13:55:35 +0800 | [diff] [blame] | 411 | list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { |
Chunming Zhou | ec75f57 | 2016-06-29 15:23:55 +0800 | [diff] [blame] | 412 | struct amd_sched_fence *s_fence = s_job->s_fence; |
Chunming Zhou | 1c62cf9 | 2016-07-25 13:55:35 +0800 | [diff] [blame] | 413 | struct fence *fence; |
Chunming Zhou | bdc2eea | 2016-07-22 13:01:02 +0800 | [diff] [blame] | 414 | |
Chunming Zhou | 1c62cf9 | 2016-07-25 13:55:35 +0800 | [diff] [blame] | 415 | spin_unlock(&sched->job_list_lock); |
| 416 | fence = sched->ops->run_job(s_job); |
Chunming Zhou | bdc2eea | 2016-07-22 13:01:02 +0800 | [diff] [blame] | 417 | atomic_inc(&sched->hw_rq_count); |
Chunming Zhou | ec75f57 | 2016-06-29 15:23:55 +0800 | [diff] [blame] | 418 | if (fence) { |
| 419 | s_fence->parent = fence_get(fence); |
| 420 | r = fence_add_callback(fence, &s_fence->cb, |
| 421 | amd_sched_process_job); |
| 422 | if (r == -ENOENT) |
| 423 | amd_sched_process_job(fence, &s_fence->cb); |
| 424 | else if (r) |
| 425 | DRM_ERROR("fence add callback failed (%d)\n", |
| 426 | r); |
| 427 | fence_put(fence); |
| 428 | } else { |
| 429 | DRM_ERROR("Failed to run job!\n"); |
| 430 | amd_sched_process_job(NULL, &s_fence->cb); |
| 431 | } |
Chunming Zhou | 1c62cf9 | 2016-07-25 13:55:35 +0800 | [diff] [blame] | 432 | spin_lock(&sched->job_list_lock); |
Chunming Zhou | ec75f57 | 2016-06-29 15:23:55 +0800 | [diff] [blame] | 433 | } |
| 434 | spin_unlock(&sched->job_list_lock); |
| 435 | } |
| 436 | |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 437 | /** |
| 438 | * Submit a job to the job queue |
| 439 | * |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 440 | * @sched_job The pointer to job required to submit |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 441 | * |
| 442 | * Returns 0 for success, negative error code otherwise. |
| 443 | */ |
Christian König | e284022 | 2015-11-05 19:49:48 +0100 | [diff] [blame] | 444 | void amd_sched_entity_push_job(struct amd_sched_job *sched_job) |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 445 | { |
| 446 | struct amd_sched_entity *entity = sched_job->s_entity; |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 447 | |
Nicolai Hähnle | 786b521 | 2015-12-02 17:35:12 +0100 | [diff] [blame] | 448 | trace_amd_sched_job(sched_job); |
Christian König | 6fc1367 | 2016-05-20 12:53:52 +0200 | [diff] [blame] | 449 | fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb, |
Christian König | c5f74f7 | 2016-05-19 09:54:15 +0200 | [diff] [blame] | 450 | amd_sched_job_finish_cb); |
Christian König | 0f75aee | 2015-09-07 18:07:14 +0200 | [diff] [blame] | 451 | wait_event(entity->sched->job_scheduled, |
Chunming Zhou | c9f0fe5 | 2015-08-31 15:46:12 +0800 | [diff] [blame] | 452 | amd_sched_entity_in(sched_job)); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 453 | } |
| 454 | |
Monk Liu | e686941 | 2016-03-07 12:49:55 +0800 | [diff] [blame] | 455 | /* init a sched_job with basic field */ |
| 456 | int amd_sched_job_init(struct amd_sched_job *job, |
Christian König | 16a7133 | 2016-05-18 09:43:07 +0200 | [diff] [blame] | 457 | struct amd_gpu_scheduler *sched, |
| 458 | struct amd_sched_entity *entity, |
Christian König | 595a9cd | 2016-06-30 10:52:03 +0200 | [diff] [blame] | 459 | void *owner) |
Monk Liu | e686941 | 2016-03-07 12:49:55 +0800 | [diff] [blame] | 460 | { |
| 461 | job->sched = sched; |
| 462 | job->s_entity = entity; |
| 463 | job->s_fence = amd_sched_fence_create(entity, owner); |
| 464 | if (!job->s_fence) |
| 465 | return -ENOMEM; |
| 466 | |
Christian König | c5f74f7 | 2016-05-19 09:54:15 +0200 | [diff] [blame] | 467 | INIT_WORK(&job->finish_work, amd_sched_job_finish); |
| 468 | INIT_LIST_HEAD(&job->node); |
Christian König | 0e51a77 | 2016-05-18 14:19:32 +0200 | [diff] [blame] | 469 | INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout); |
Monk Liu | 4835096 | 2016-03-04 14:33:44 +0800 | [diff] [blame] | 470 | |
Monk Liu | e686941 | 2016-03-07 12:49:55 +0800 | [diff] [blame] | 471 | return 0; |
| 472 | } |
| 473 | |
Christian König | e688b728 | 2015-08-20 17:01:01 +0200 | [diff] [blame] | 474 | /** |
| 475 | * Return ture if we can push more jobs to the hw. |
| 476 | */ |
| 477 | static bool amd_sched_ready(struct amd_gpu_scheduler *sched) |
| 478 | { |
| 479 | return atomic_read(&sched->hw_rq_count) < |
| 480 | sched->hw_submission_limit; |
| 481 | } |
| 482 | |
| 483 | /** |
Christian König | 8807900 | 2015-08-24 14:29:40 +0200 | [diff] [blame] | 484 | * Wake up the scheduler when it is ready |
| 485 | */ |
| 486 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) |
| 487 | { |
| 488 | if (amd_sched_ready(sched)) |
Christian König | c2b6bd7 | 2015-08-25 21:39:31 +0200 | [diff] [blame] | 489 | wake_up_interruptible(&sched->wake_up_worker); |
Christian König | 8807900 | 2015-08-24 14:29:40 +0200 | [diff] [blame] | 490 | } |
| 491 | |
| 492 | /** |
Christian König | 3d65193 | 2015-11-12 21:10:35 +0100 | [diff] [blame] | 493 | * Select next entity to process |
Christian König | e688b728 | 2015-08-20 17:01:01 +0200 | [diff] [blame] | 494 | */ |
Christian König | 3d65193 | 2015-11-12 21:10:35 +0100 | [diff] [blame] | 495 | static struct amd_sched_entity * |
| 496 | amd_sched_select_entity(struct amd_gpu_scheduler *sched) |
Christian König | e688b728 | 2015-08-20 17:01:01 +0200 | [diff] [blame] | 497 | { |
Christian König | 3d65193 | 2015-11-12 21:10:35 +0100 | [diff] [blame] | 498 | struct amd_sched_entity *entity; |
Chunming Zhou | d033a6d | 2015-11-05 15:23:09 +0800 | [diff] [blame] | 499 | int i; |
Christian König | e688b728 | 2015-08-20 17:01:01 +0200 | [diff] [blame] | 500 | |
| 501 | if (!amd_sched_ready(sched)) |
| 502 | return NULL; |
| 503 | |
| 504 | /* Kernel run queue has higher priority than normal run queue*/ |
Chunming Zhou | d033a6d | 2015-11-05 15:23:09 +0800 | [diff] [blame] | 505 | for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++) { |
| 506 | entity = amd_sched_rq_select_entity(&sched->sched_rq[i]); |
| 507 | if (entity) |
| 508 | break; |
| 509 | } |
Christian König | e688b728 | 2015-08-20 17:01:01 +0200 | [diff] [blame] | 510 | |
Christian König | 3d65193 | 2015-11-12 21:10:35 +0100 | [diff] [blame] | 511 | return entity; |
Christian König | e688b728 | 2015-08-20 17:01:01 +0200 | [diff] [blame] | 512 | } |
| 513 | |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 514 | static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) |
| 515 | { |
Christian König | 258f3f9 | 2015-08-31 17:02:52 +0200 | [diff] [blame] | 516 | struct amd_sched_fence *s_fence = |
| 517 | container_of(cb, struct amd_sched_fence, cb); |
Christian König | 9b398fa | 2015-09-07 18:16:49 +0200 | [diff] [blame] | 518 | struct amd_gpu_scheduler *sched = s_fence->sched; |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 519 | |
Christian König | c746ba2 | 2015-08-19 16:12:15 +0200 | [diff] [blame] | 520 | atomic_dec(&sched->hw_rq_count); |
Christian König | 6fc1367 | 2016-05-20 12:53:52 +0200 | [diff] [blame] | 521 | amd_sched_fence_finished(s_fence); |
Monk Liu | cccd9bc | 2016-03-04 14:42:26 +0800 | [diff] [blame] | 522 | |
Chunming Zhou | 7034dec | 2015-11-11 14:56:00 +0800 | [diff] [blame] | 523 | trace_amd_sched_process_job(s_fence); |
Christian König | 6fc1367 | 2016-05-20 12:53:52 +0200 | [diff] [blame] | 524 | fence_put(&s_fence->finished); |
Christian König | c2b6bd7 | 2015-08-25 21:39:31 +0200 | [diff] [blame] | 525 | wake_up_interruptible(&sched->wake_up_worker); |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 526 | } |
| 527 | |
Chunming Zhou | 0875dc9 | 2016-06-12 15:41:58 +0800 | [diff] [blame] | 528 | static bool amd_sched_blocked(struct amd_gpu_scheduler *sched) |
| 529 | { |
| 530 | if (kthread_should_park()) { |
| 531 | kthread_parkme(); |
| 532 | return true; |
| 533 | } |
| 534 | |
| 535 | return false; |
| 536 | } |
| 537 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 538 | static int amd_sched_main(void *param) |
| 539 | { |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 540 | struct sched_param sparam = {.sched_priority = 1}; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 541 | struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param; |
Alex Deucher | 5134e99 | 2015-09-04 00:11:54 -0400 | [diff] [blame] | 542 | int r, count; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 543 | |
| 544 | sched_setscheduler(current, SCHED_FIFO, &sparam); |
| 545 | |
| 546 | while (!kthread_should_stop()) { |
Chunming Zhou | 0875dc9 | 2016-06-12 15:41:58 +0800 | [diff] [blame] | 547 | struct amd_sched_entity *entity = NULL; |
Christian König | 258f3f9 | 2015-08-31 17:02:52 +0200 | [diff] [blame] | 548 | struct amd_sched_fence *s_fence; |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 549 | struct amd_sched_job *sched_job; |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 550 | struct fence *fence; |
| 551 | |
Christian König | c2b6bd7 | 2015-08-25 21:39:31 +0200 | [diff] [blame] | 552 | wait_event_interruptible(sched->wake_up_worker, |
Chunming Zhou | 0875dc9 | 2016-06-12 15:41:58 +0800 | [diff] [blame] | 553 | (!amd_sched_blocked(sched) && |
| 554 | (entity = amd_sched_select_entity(sched))) || |
| 555 | kthread_should_stop()); |
Christian König | f85a6dd | 2015-08-19 17:37:52 +0200 | [diff] [blame] | 556 | |
Christian König | 3d65193 | 2015-11-12 21:10:35 +0100 | [diff] [blame] | 557 | if (!entity) |
| 558 | continue; |
| 559 | |
| 560 | sched_job = amd_sched_entity_pop_job(entity); |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 561 | if (!sched_job) |
Christian König | f85a6dd | 2015-08-19 17:37:52 +0200 | [diff] [blame] | 562 | continue; |
| 563 | |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 564 | s_fence = sched_job->s_fence; |
Junwei Zhang | 2440ff2 | 2015-10-10 08:48:42 +0800 | [diff] [blame] | 565 | |
Christian König | b034b57 | 2015-08-20 17:08:25 +0200 | [diff] [blame] | 566 | atomic_inc(&sched->hw_rq_count); |
Christian König | 7392c32 | 2016-05-18 13:00:38 +0200 | [diff] [blame] | 567 | amd_sched_job_begin(sched_job); |
Christian König | 7392c32 | 2016-05-18 13:00:38 +0200 | [diff] [blame] | 568 | |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 569 | fence = sched->ops->run_job(sched_job); |
Christian König | 393a0bd | 2015-11-05 12:57:10 +0100 | [diff] [blame] | 570 | amd_sched_fence_scheduled(s_fence); |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 571 | if (fence) { |
Chunming Zhou | 754ce0f | 2016-06-30 11:23:31 +0800 | [diff] [blame] | 572 | s_fence->parent = fence_get(fence); |
Christian König | 258f3f9 | 2015-08-31 17:02:52 +0200 | [diff] [blame] | 573 | r = fence_add_callback(fence, &s_fence->cb, |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 574 | amd_sched_process_job); |
| 575 | if (r == -ENOENT) |
Christian König | 258f3f9 | 2015-08-31 17:02:52 +0200 | [diff] [blame] | 576 | amd_sched_process_job(fence, &s_fence->cb); |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 577 | else if (r) |
Christian König | 16a7133 | 2016-05-18 09:43:07 +0200 | [diff] [blame] | 578 | DRM_ERROR("fence add callback failed (%d)\n", |
| 579 | r); |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 580 | fence_put(fence); |
Christian König | 27439fc | 2015-09-02 12:03:06 +0200 | [diff] [blame] | 581 | } else { |
| 582 | DRM_ERROR("Failed to run job!\n"); |
Christian König | 258f3f9 | 2015-08-31 17:02:52 +0200 | [diff] [blame] | 583 | amd_sched_process_job(NULL, &s_fence->cb); |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 584 | } |
Christian König | aef4852 | 2015-08-20 14:47:46 +0200 | [diff] [blame] | 585 | |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 586 | count = kfifo_out(&entity->job_queue, &sched_job, |
| 587 | sizeof(sched_job)); |
| 588 | WARN_ON(count != sizeof(sched_job)); |
Christian König | c2b6bd7 | 2015-08-25 21:39:31 +0200 | [diff] [blame] | 589 | wake_up(&sched->job_scheduled); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 590 | } |
| 591 | return 0; |
| 592 | } |
| 593 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 594 | /** |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 595 | * Init a gpu scheduler instance |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 596 | * |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 597 | * @sched The pointer to the scheduler |
Christian König | 69f7dd6 | 2015-08-20 17:24:40 +0200 | [diff] [blame] | 598 | * @ops The backend operations for this scheduler. |
Christian König | 69f7dd6 | 2015-08-20 17:24:40 +0200 | [diff] [blame] | 599 | * @hw_submissions Number of hw submissions to do. |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 600 | * @name Name used for debugging |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 601 | * |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 602 | * Return 0 on success, otherwise error code. |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 603 | */ |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 604 | int amd_sched_init(struct amd_gpu_scheduler *sched, |
Nils Wallménius | 62250a9 | 2016-04-10 16:30:00 +0200 | [diff] [blame] | 605 | const struct amd_sched_backend_ops *ops, |
Junwei Zhang | 2440ff2 | 2015-10-10 08:48:42 +0800 | [diff] [blame] | 606 | unsigned hw_submission, long timeout, const char *name) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 607 | { |
Chunming Zhou | d033a6d | 2015-11-05 15:23:09 +0800 | [diff] [blame] | 608 | int i; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 609 | sched->ops = ops; |
Chunming Zhou | 4cef926 | 2015-08-05 19:52:14 +0800 | [diff] [blame] | 610 | sched->hw_submission_limit = hw_submission; |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 611 | sched->name = name; |
Junwei Zhang | 2440ff2 | 2015-10-10 08:48:42 +0800 | [diff] [blame] | 612 | sched->timeout = timeout; |
Chunming Zhou | d033a6d | 2015-11-05 15:23:09 +0800 | [diff] [blame] | 613 | for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++) |
| 614 | amd_sched_rq_init(&sched->sched_rq[i]); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 615 | |
Christian König | c2b6bd7 | 2015-08-25 21:39:31 +0200 | [diff] [blame] | 616 | init_waitqueue_head(&sched->wake_up_worker); |
| 617 | init_waitqueue_head(&sched->job_scheduled); |
Monk Liu | 4835096 | 2016-03-04 14:33:44 +0800 | [diff] [blame] | 618 | INIT_LIST_HEAD(&sched->ring_mirror_list); |
| 619 | spin_lock_init(&sched->job_list_lock); |
Christian König | c746ba2 | 2015-08-19 16:12:15 +0200 | [diff] [blame] | 620 | atomic_set(&sched->hw_rq_count, 0); |
Chunming Zhou | f5617f9 | 2015-11-05 11:41:50 +0800 | [diff] [blame] | 621 | if (atomic_inc_return(&sched_fence_slab_ref) == 1) { |
| 622 | sched_fence_slab = kmem_cache_create( |
| 623 | "amd_sched_fence", sizeof(struct amd_sched_fence), 0, |
| 624 | SLAB_HWCACHE_ALIGN, NULL); |
| 625 | if (!sched_fence_slab) |
| 626 | return -ENOMEM; |
| 627 | } |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 628 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 629 | /* Each scheduler will run on a seperate kernel thread */ |
Christian König | c14692f0 | 2015-08-21 15:18:47 +0200 | [diff] [blame] | 630 | sched->thread = kthread_run(amd_sched_main, sched, sched->name); |
Christian König | f495659 | 2015-08-20 16:59:38 +0200 | [diff] [blame] | 631 | if (IS_ERR(sched->thread)) { |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 632 | DRM_ERROR("Failed to create scheduler for %s.\n", name); |
| 633 | return PTR_ERR(sched->thread); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 634 | } |
| 635 | |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 636 | return 0; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 637 | } |
| 638 | |
| 639 | /** |
| 640 | * Destroy a gpu scheduler |
| 641 | * |
| 642 | * @sched The pointer to the scheduler |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 643 | */ |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 644 | void amd_sched_fini(struct amd_gpu_scheduler *sched) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 645 | { |
Dave Airlie | 32544d0 | 2015-11-03 11:10:03 -0500 | [diff] [blame] | 646 | if (sched->thread) |
| 647 | kthread_stop(sched->thread); |
Chunming Zhou | f5617f9 | 2015-11-05 11:41:50 +0800 | [diff] [blame] | 648 | if (atomic_dec_and_test(&sched_fence_slab_ref)) |
| 649 | kmem_cache_destroy(sched_fence_slab); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 650 | } |