Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2015 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * |
| 22 | * |
| 23 | */ |
| 24 | #include <linux/kthread.h> |
| 25 | #include <linux/wait.h> |
| 26 | #include <linux/sched.h> |
Ingo Molnar | ae7e81c | 2017-02-01 18:07:51 +0100 | [diff] [blame] | 27 | #include <uapi/linux/sched/types.h> |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 28 | #include <drm/drmP.h> |
| 29 | #include "gpu_scheduler.h" |
| 30 | |
Chunming Zhou | 353da3c | 2015-09-07 16:06:53 +0800 | [diff] [blame] | 31 | #define CREATE_TRACE_POINTS |
| 32 | #include "gpu_sched_trace.h" |
| 33 | |
Christian König | 3d65193 | 2015-11-12 21:10:35 +0100 | [diff] [blame] | 34 | static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); |
Christian König | 8807900 | 2015-08-24 14:29:40 +0200 | [diff] [blame] | 35 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 36 | static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb); |
Christian König | 8807900 | 2015-08-24 14:29:40 +0200 | [diff] [blame] | 37 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 38 | /* Initialize a given run queue struct */ |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 39 | static void amd_sched_rq_init(struct amd_sched_rq *rq) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 40 | { |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 41 | spin_lock_init(&rq->lock); |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 42 | INIT_LIST_HEAD(&rq->entities); |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 43 | rq->current_entity = NULL; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 44 | } |
| 45 | |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 46 | static void amd_sched_rq_add_entity(struct amd_sched_rq *rq, |
| 47 | struct amd_sched_entity *entity) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 48 | { |
Chunming Zhou | e8deea2 | 2015-12-11 18:22:52 +0800 | [diff] [blame] | 49 | if (!list_empty(&entity->list)) |
| 50 | return; |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 51 | spin_lock(&rq->lock); |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 52 | list_add_tail(&entity->list, &rq->entities); |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 53 | spin_unlock(&rq->lock); |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 54 | } |
| 55 | |
| 56 | static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, |
| 57 | struct amd_sched_entity *entity) |
| 58 | { |
Chunming Zhou | e8deea2 | 2015-12-11 18:22:52 +0800 | [diff] [blame] | 59 | if (list_empty(&entity->list)) |
| 60 | return; |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 61 | spin_lock(&rq->lock); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 62 | list_del_init(&entity->list); |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 63 | if (rq->current_entity == entity) |
| 64 | rq->current_entity = NULL; |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 65 | spin_unlock(&rq->lock); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 66 | } |
| 67 | |
| 68 | /** |
Christian König | 3d65193 | 2015-11-12 21:10:35 +0100 | [diff] [blame] | 69 | * Select an entity which could provide a job to run |
| 70 | * |
| 71 | * @rq The run queue to check. |
| 72 | * |
| 73 | * Try to find a ready entity, returns NULL if none found. |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 74 | */ |
Christian König | 3d65193 | 2015-11-12 21:10:35 +0100 | [diff] [blame] | 75 | static struct amd_sched_entity * |
| 76 | amd_sched_rq_select_entity(struct amd_sched_rq *rq) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 77 | { |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 78 | struct amd_sched_entity *entity; |
Christian König | 4cd7f42c | 2015-08-05 18:18:52 +0200 | [diff] [blame] | 79 | |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 80 | spin_lock(&rq->lock); |
| 81 | |
| 82 | entity = rq->current_entity; |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 83 | if (entity) { |
| 84 | list_for_each_entry_continue(entity, &rq->entities, list) { |
Christian König | 3d65193 | 2015-11-12 21:10:35 +0100 | [diff] [blame] | 85 | if (amd_sched_entity_is_ready(entity)) { |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 86 | rq->current_entity = entity; |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 87 | spin_unlock(&rq->lock); |
Christian König | 3d65193 | 2015-11-12 21:10:35 +0100 | [diff] [blame] | 88 | return entity; |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 89 | } |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 90 | } |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 91 | } |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 92 | |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 93 | list_for_each_entry(entity, &rq->entities, list) { |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 94 | |
Christian König | 3d65193 | 2015-11-12 21:10:35 +0100 | [diff] [blame] | 95 | if (amd_sched_entity_is_ready(entity)) { |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 96 | rq->current_entity = entity; |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 97 | spin_unlock(&rq->lock); |
Christian König | 3d65193 | 2015-11-12 21:10:35 +0100 | [diff] [blame] | 98 | return entity; |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 99 | } |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 100 | |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 101 | if (entity == rq->current_entity) |
| 102 | break; |
| 103 | } |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 104 | |
Christian König | 2b184d8 | 2015-08-18 14:41:25 +0200 | [diff] [blame] | 105 | spin_unlock(&rq->lock); |
| 106 | |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 107 | return NULL; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 108 | } |
| 109 | |
| 110 | /** |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 111 | * Init a context entity used by scheduler when submit to HW ring. |
| 112 | * |
| 113 | * @sched The pointer to the scheduler |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 114 | * @entity The pointer to a valid amd_sched_entity |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 115 | * @rq The run queue this entity belongs |
Christian König | 0e89d0c | 2015-08-04 16:58:36 +0200 | [diff] [blame] | 116 | * @kernel If this is an entity for the kernel |
Jammy Zhou | 1333f72 | 2015-07-30 16:36:58 +0800 | [diff] [blame] | 117 | * @jobs The max number of jobs in the job queue |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 118 | * |
| 119 | * return 0 if succeed. negative error code on failure |
| 120 | */ |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 121 | int amd_sched_entity_init(struct amd_gpu_scheduler *sched, |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 122 | struct amd_sched_entity *entity, |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 123 | struct amd_sched_rq *rq, |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 124 | uint32_t jobs) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 125 | { |
Christian König | 0f75aee | 2015-09-07 18:07:14 +0200 | [diff] [blame] | 126 | int r; |
| 127 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 128 | if (!(sched && entity && rq)) |
| 129 | return -EINVAL; |
| 130 | |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 131 | memset(entity, 0, sizeof(struct amd_sched_entity)); |
Christian König | 0f75aee | 2015-09-07 18:07:14 +0200 | [diff] [blame] | 132 | INIT_LIST_HEAD(&entity->list); |
| 133 | entity->rq = rq; |
| 134 | entity->sched = sched; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 135 | |
| 136 | spin_lock_init(&entity->queue_lock); |
Christian König | 0f75aee | 2015-09-07 18:07:14 +0200 | [diff] [blame] | 137 | r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL); |
| 138 | if (r) |
| 139 | return r; |
| 140 | |
Christian König | ce882e6 | 2015-08-19 15:00:55 +0200 | [diff] [blame] | 141 | atomic_set(&entity->fence_seq, 0); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 142 | entity->fence_context = dma_fence_context_alloc(2); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 143 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 144 | return 0; |
| 145 | } |
| 146 | |
| 147 | /** |
| 148 | * Query if entity is initialized |
| 149 | * |
| 150 | * @sched Pointer to scheduler instance |
| 151 | * @entity The pointer to a valid scheduler entity |
| 152 | * |
| 153 | * return true if entity is initialized, false otherwise |
| 154 | */ |
Christian König | d54fdb9 | 2015-08-20 17:03:48 +0200 | [diff] [blame] | 155 | static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched, |
| 156 | struct amd_sched_entity *entity) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 157 | { |
Christian König | 0f75aee | 2015-09-07 18:07:14 +0200 | [diff] [blame] | 158 | return entity->sched == sched && |
| 159 | entity->rq != NULL; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 160 | } |
| 161 | |
Christian König | aef4852 | 2015-08-20 14:47:46 +0200 | [diff] [blame] | 162 | /** |
| 163 | * Check if entity is idle |
| 164 | * |
| 165 | * @entity The pointer to a valid scheduler entity |
| 166 | * |
| 167 | * Return true if entity don't has any unscheduled jobs. |
| 168 | */ |
| 169 | static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 170 | { |
Christian König | aef4852 | 2015-08-20 14:47:46 +0200 | [diff] [blame] | 171 | rmb(); |
| 172 | if (kfifo_is_empty(&entity->job_queue)) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 173 | return true; |
| 174 | |
| 175 | return false; |
| 176 | } |
| 177 | |
| 178 | /** |
Christian König | 3d65193 | 2015-11-12 21:10:35 +0100 | [diff] [blame] | 179 | * Check if entity is ready |
| 180 | * |
| 181 | * @entity The pointer to a valid scheduler entity |
| 182 | * |
| 183 | * Return true if entity could provide a job. |
| 184 | */ |
| 185 | static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity) |
| 186 | { |
| 187 | if (kfifo_is_empty(&entity->job_queue)) |
| 188 | return false; |
| 189 | |
| 190 | if (ACCESS_ONCE(entity->dependency)) |
| 191 | return false; |
| 192 | |
| 193 | return true; |
| 194 | } |
| 195 | |
| 196 | /** |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 197 | * Destroy a context entity |
| 198 | * |
| 199 | * @sched Pointer to scheduler instance |
| 200 | * @entity The pointer to a valid scheduler entity |
| 201 | * |
Christian König | 062c7fb | 2015-08-21 15:46:43 +0200 | [diff] [blame] | 202 | * Cleanup and free the allocated resources. |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 203 | */ |
Christian König | 062c7fb | 2015-08-21 15:46:43 +0200 | [diff] [blame] | 204 | void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, |
| 205 | struct amd_sched_entity *entity) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 206 | { |
Christian König | 0f75aee | 2015-09-07 18:07:14 +0200 | [diff] [blame] | 207 | struct amd_sched_rq *rq = entity->rq; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 208 | |
Christian König | d54fdb9 | 2015-08-20 17:03:48 +0200 | [diff] [blame] | 209 | if (!amd_sched_entity_is_initialized(sched, entity)) |
Christian König | 062c7fb | 2015-08-21 15:46:43 +0200 | [diff] [blame] | 210 | return; |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 211 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 212 | /** |
| 213 | * The client will not queue more IBs during this fini, consume existing |
| 214 | * queued IBs |
| 215 | */ |
Christian König | c2b6bd7 | 2015-08-25 21:39:31 +0200 | [diff] [blame] | 216 | wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity)); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 217 | |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 218 | amd_sched_rq_remove_entity(rq, entity); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 219 | kfifo_free(&entity->job_queue); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 220 | } |
| 221 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 222 | static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) |
Christian König | e61235d | 2015-08-25 11:05:36 +0200 | [diff] [blame] | 223 | { |
| 224 | struct amd_sched_entity *entity = |
| 225 | container_of(cb, struct amd_sched_entity, cb); |
| 226 | entity->dependency = NULL; |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 227 | dma_fence_put(f); |
Christian König | 0f75aee | 2015-09-07 18:07:14 +0200 | [diff] [blame] | 228 | amd_sched_wakeup(entity->sched); |
Christian König | e61235d | 2015-08-25 11:05:36 +0200 | [diff] [blame] | 229 | } |
| 230 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 231 | static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb) |
Monk Liu | 777dbd4 | 2016-01-26 14:59:57 +0800 | [diff] [blame] | 232 | { |
| 233 | struct amd_sched_entity *entity = |
| 234 | container_of(cb, struct amd_sched_entity, cb); |
| 235 | entity->dependency = NULL; |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 236 | dma_fence_put(f); |
Monk Liu | 777dbd4 | 2016-01-26 14:59:57 +0800 | [diff] [blame] | 237 | } |
| 238 | |
Chunming Zhou | 30514de | 2017-05-09 13:39:40 +0800 | [diff] [blame] | 239 | bool amd_sched_dependency_optimized(struct dma_fence* fence, |
| 240 | struct amd_sched_entity *entity) |
| 241 | { |
| 242 | struct amd_gpu_scheduler *sched = entity->sched; |
| 243 | struct amd_sched_fence *s_fence; |
| 244 | |
| 245 | if (!fence || dma_fence_is_signaled(fence)) |
| 246 | return false; |
| 247 | if (fence->context == entity->fence_context) |
| 248 | return true; |
| 249 | s_fence = to_amd_sched_fence(fence); |
| 250 | if (s_fence && s_fence->sched == sched) |
| 251 | return true; |
| 252 | |
| 253 | return false; |
| 254 | } |
| 255 | |
Christian König | 393a0bd | 2015-11-05 12:57:10 +0100 | [diff] [blame] | 256 | static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) |
| 257 | { |
| 258 | struct amd_gpu_scheduler *sched = entity->sched; |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 259 | struct dma_fence * fence = entity->dependency; |
Christian König | 393a0bd | 2015-11-05 12:57:10 +0100 | [diff] [blame] | 260 | struct amd_sched_fence *s_fence; |
| 261 | |
| 262 | if (fence->context == entity->fence_context) { |
| 263 | /* We can ignore fences from ourself */ |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 264 | dma_fence_put(entity->dependency); |
Christian König | 393a0bd | 2015-11-05 12:57:10 +0100 | [diff] [blame] | 265 | return false; |
| 266 | } |
| 267 | |
| 268 | s_fence = to_amd_sched_fence(fence); |
| 269 | if (s_fence && s_fence->sched == sched) { |
Christian König | 393a0bd | 2015-11-05 12:57:10 +0100 | [diff] [blame] | 270 | |
Christian König | 6fc1367 | 2016-05-20 12:53:52 +0200 | [diff] [blame] | 271 | /* |
| 272 | * Fence is from the same scheduler, only need to wait for |
| 273 | * it to be scheduled |
| 274 | */ |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 275 | fence = dma_fence_get(&s_fence->scheduled); |
| 276 | dma_fence_put(entity->dependency); |
Christian König | 6fc1367 | 2016-05-20 12:53:52 +0200 | [diff] [blame] | 277 | entity->dependency = fence; |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 278 | if (!dma_fence_add_callback(fence, &entity->cb, |
| 279 | amd_sched_entity_clear_dep)) |
Christian König | 6fc1367 | 2016-05-20 12:53:52 +0200 | [diff] [blame] | 280 | return true; |
| 281 | |
| 282 | /* Ignore it when it is already scheduled */ |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 283 | dma_fence_put(fence); |
Christian König | 6fc1367 | 2016-05-20 12:53:52 +0200 | [diff] [blame] | 284 | return false; |
Christian König | 393a0bd | 2015-11-05 12:57:10 +0100 | [diff] [blame] | 285 | } |
| 286 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 287 | if (!dma_fence_add_callback(entity->dependency, &entity->cb, |
| 288 | amd_sched_entity_wakeup)) |
Christian König | 393a0bd | 2015-11-05 12:57:10 +0100 | [diff] [blame] | 289 | return true; |
| 290 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 291 | dma_fence_put(entity->dependency); |
Christian König | 393a0bd | 2015-11-05 12:57:10 +0100 | [diff] [blame] | 292 | return false; |
| 293 | } |
| 294 | |
Christian König | 69bd5bf | 2015-08-26 11:31:23 +0200 | [diff] [blame] | 295 | static struct amd_sched_job * |
| 296 | amd_sched_entity_pop_job(struct amd_sched_entity *entity) |
| 297 | { |
Christian König | 0f75aee | 2015-09-07 18:07:14 +0200 | [diff] [blame] | 298 | struct amd_gpu_scheduler *sched = entity->sched; |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 299 | struct amd_sched_job *sched_job; |
Christian König | 69bd5bf | 2015-08-26 11:31:23 +0200 | [diff] [blame] | 300 | |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 301 | if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) |
Christian König | 69bd5bf | 2015-08-26 11:31:23 +0200 | [diff] [blame] | 302 | return NULL; |
| 303 | |
Christian König | 393a0bd | 2015-11-05 12:57:10 +0100 | [diff] [blame] | 304 | while ((entity->dependency = sched->ops->dependency(sched_job))) |
| 305 | if (amd_sched_entity_add_dependency_cb(entity)) |
Christian König | e61235d | 2015-08-25 11:05:36 +0200 | [diff] [blame] | 306 | return NULL; |
Christian König | e61235d | 2015-08-25 11:05:36 +0200 | [diff] [blame] | 307 | |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 308 | return sched_job; |
Christian König | 69bd5bf | 2015-08-26 11:31:23 +0200 | [diff] [blame] | 309 | } |
| 310 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 311 | /** |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 312 | * Helper to submit a job to the job queue |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 313 | * |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 314 | * @sched_job The pointer to job required to submit |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 315 | * |
| 316 | * Returns true if we could submit the job. |
| 317 | */ |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 318 | static bool amd_sched_entity_in(struct amd_sched_job *sched_job) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 319 | { |
Nicolai Hähnle | 786b521 | 2015-12-02 17:35:12 +0100 | [diff] [blame] | 320 | struct amd_gpu_scheduler *sched = sched_job->sched; |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 321 | struct amd_sched_entity *entity = sched_job->s_entity; |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 322 | bool added, first = false; |
| 323 | |
| 324 | spin_lock(&entity->queue_lock); |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 325 | added = kfifo_in(&entity->job_queue, &sched_job, |
| 326 | sizeof(sched_job)) == sizeof(sched_job); |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 327 | |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 328 | if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job)) |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 329 | first = true; |
| 330 | |
| 331 | spin_unlock(&entity->queue_lock); |
| 332 | |
| 333 | /* first job wakes up scheduler */ |
Chunming Zhou | e8deea2 | 2015-12-11 18:22:52 +0800 | [diff] [blame] | 334 | if (first) { |
| 335 | /* Add the entity to the run queue */ |
| 336 | amd_sched_rq_add_entity(entity->rq, entity); |
Nicolai Hähnle | 786b521 | 2015-12-02 17:35:12 +0100 | [diff] [blame] | 337 | amd_sched_wakeup(sched); |
Chunming Zhou | e8deea2 | 2015-12-11 18:22:52 +0800 | [diff] [blame] | 338 | } |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 339 | return added; |
| 340 | } |
| 341 | |
Monk Liu | 0de2479 | 2016-03-04 18:51:02 +0800 | [diff] [blame] | 342 | /* job_finish is called after hw fence signaled, and |
| 343 | * the job had already been deleted from ring_mirror_list |
| 344 | */ |
Christian König | c5f74f7 | 2016-05-19 09:54:15 +0200 | [diff] [blame] | 345 | static void amd_sched_job_finish(struct work_struct *work) |
Monk Liu | 0de2479 | 2016-03-04 18:51:02 +0800 | [diff] [blame] | 346 | { |
Christian König | c5f74f7 | 2016-05-19 09:54:15 +0200 | [diff] [blame] | 347 | struct amd_sched_job *s_job = container_of(work, struct amd_sched_job, |
| 348 | finish_work); |
Monk Liu | 0de2479 | 2016-03-04 18:51:02 +0800 | [diff] [blame] | 349 | struct amd_gpu_scheduler *sched = s_job->sched; |
| 350 | |
Christian König | f42d20a9 | 2016-05-18 15:40:58 +0200 | [diff] [blame] | 351 | /* remove job from ring_mirror_list */ |
Christian König | 1059e11 | 2016-06-13 16:12:43 +0200 | [diff] [blame] | 352 | spin_lock(&sched->job_list_lock); |
Christian König | f42d20a9 | 2016-05-18 15:40:58 +0200 | [diff] [blame] | 353 | list_del_init(&s_job->node); |
Monk Liu | 0de2479 | 2016-03-04 18:51:02 +0800 | [diff] [blame] | 354 | if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { |
Christian König | c5f74f7 | 2016-05-19 09:54:15 +0200 | [diff] [blame] | 355 | struct amd_sched_job *next; |
| 356 | |
Christian König | 1059e11 | 2016-06-13 16:12:43 +0200 | [diff] [blame] | 357 | spin_unlock(&sched->job_list_lock); |
Christian König | c5f74f7 | 2016-05-19 09:54:15 +0200 | [diff] [blame] | 358 | cancel_delayed_work_sync(&s_job->work_tdr); |
Christian König | 1059e11 | 2016-06-13 16:12:43 +0200 | [diff] [blame] | 359 | spin_lock(&sched->job_list_lock); |
Monk Liu | 0de2479 | 2016-03-04 18:51:02 +0800 | [diff] [blame] | 360 | |
| 361 | /* queue TDR for next job */ |
| 362 | next = list_first_entry_or_null(&sched->ring_mirror_list, |
| 363 | struct amd_sched_job, node); |
| 364 | |
Christian König | c5f74f7 | 2016-05-19 09:54:15 +0200 | [diff] [blame] | 365 | if (next) |
Monk Liu | 0de2479 | 2016-03-04 18:51:02 +0800 | [diff] [blame] | 366 | schedule_delayed_work(&next->work_tdr, sched->timeout); |
Monk Liu | 0de2479 | 2016-03-04 18:51:02 +0800 | [diff] [blame] | 367 | } |
Christian König | 1059e11 | 2016-06-13 16:12:43 +0200 | [diff] [blame] | 368 | spin_unlock(&sched->job_list_lock); |
Christian König | c5f74f7 | 2016-05-19 09:54:15 +0200 | [diff] [blame] | 369 | sched->ops->free_job(s_job); |
| 370 | } |
| 371 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 372 | static void amd_sched_job_finish_cb(struct dma_fence *f, |
| 373 | struct dma_fence_cb *cb) |
Christian König | c5f74f7 | 2016-05-19 09:54:15 +0200 | [diff] [blame] | 374 | { |
| 375 | struct amd_sched_job *job = container_of(cb, struct amd_sched_job, |
| 376 | finish_cb); |
| 377 | schedule_work(&job->finish_work); |
Monk Liu | 0de2479 | 2016-03-04 18:51:02 +0800 | [diff] [blame] | 378 | } |
| 379 | |
Christian König | 7392c32 | 2016-05-18 13:00:38 +0200 | [diff] [blame] | 380 | static void amd_sched_job_begin(struct amd_sched_job *s_job) |
Monk Liu | 0de2479 | 2016-03-04 18:51:02 +0800 | [diff] [blame] | 381 | { |
| 382 | struct amd_gpu_scheduler *sched = s_job->sched; |
| 383 | |
Christian König | 1059e11 | 2016-06-13 16:12:43 +0200 | [diff] [blame] | 384 | spin_lock(&sched->job_list_lock); |
Christian König | f42d20a9 | 2016-05-18 15:40:58 +0200 | [diff] [blame] | 385 | list_add_tail(&s_job->node, &sched->ring_mirror_list); |
Monk Liu | 0de2479 | 2016-03-04 18:51:02 +0800 | [diff] [blame] | 386 | if (sched->timeout != MAX_SCHEDULE_TIMEOUT && |
Christian König | 16a7133 | 2016-05-18 09:43:07 +0200 | [diff] [blame] | 387 | list_first_entry_or_null(&sched->ring_mirror_list, |
| 388 | struct amd_sched_job, node) == s_job) |
Monk Liu | 0de2479 | 2016-03-04 18:51:02 +0800 | [diff] [blame] | 389 | schedule_delayed_work(&s_job->work_tdr, sched->timeout); |
Christian König | 1059e11 | 2016-06-13 16:12:43 +0200 | [diff] [blame] | 390 | spin_unlock(&sched->job_list_lock); |
Monk Liu | 0de2479 | 2016-03-04 18:51:02 +0800 | [diff] [blame] | 391 | } |
| 392 | |
Christian König | 0e51a77 | 2016-05-18 14:19:32 +0200 | [diff] [blame] | 393 | static void amd_sched_job_timedout(struct work_struct *work) |
| 394 | { |
| 395 | struct amd_sched_job *job = container_of(work, struct amd_sched_job, |
| 396 | work_tdr.work); |
| 397 | |
| 398 | job->sched->ops->timedout_job(job); |
| 399 | } |
| 400 | |
Chunming Zhou | e686e75 | 2016-06-30 11:30:37 +0800 | [diff] [blame] | 401 | void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched) |
| 402 | { |
| 403 | struct amd_sched_job *s_job; |
| 404 | |
| 405 | spin_lock(&sched->job_list_lock); |
| 406 | list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { |
Chunming Zhou | a6bef67 | 2017-04-24 17:39:00 +0800 | [diff] [blame] | 407 | if (s_job->s_fence->parent && |
| 408 | dma_fence_remove_callback(s_job->s_fence->parent, |
| 409 | &s_job->s_fence->cb)) { |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 410 | dma_fence_put(s_job->s_fence->parent); |
Chunming Zhou | e686e75 | 2016-06-30 11:30:37 +0800 | [diff] [blame] | 411 | s_job->s_fence->parent = NULL; |
| 412 | } |
| 413 | } |
Chunming Zhou | bdc2eea | 2016-07-22 13:01:02 +0800 | [diff] [blame] | 414 | atomic_set(&sched->hw_rq_count, 0); |
Chunming Zhou | e686e75 | 2016-06-30 11:30:37 +0800 | [diff] [blame] | 415 | spin_unlock(&sched->job_list_lock); |
| 416 | } |
| 417 | |
Chunming Zhou | ec75f57 | 2016-06-29 15:23:55 +0800 | [diff] [blame] | 418 | void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) |
| 419 | { |
Chunming Zhou | 1c62cf9 | 2016-07-25 13:55:35 +0800 | [diff] [blame] | 420 | struct amd_sched_job *s_job, *tmp; |
Chunming Zhou | ec75f57 | 2016-06-29 15:23:55 +0800 | [diff] [blame] | 421 | int r; |
| 422 | |
| 423 | spin_lock(&sched->job_list_lock); |
| 424 | s_job = list_first_entry_or_null(&sched->ring_mirror_list, |
| 425 | struct amd_sched_job, node); |
Christian König | bdf0013 | 2016-08-16 19:52:35 +0200 | [diff] [blame] | 426 | if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT) |
Chunming Zhou | ec75f57 | 2016-06-29 15:23:55 +0800 | [diff] [blame] | 427 | schedule_delayed_work(&s_job->work_tdr, sched->timeout); |
| 428 | |
Chunming Zhou | 1c62cf9 | 2016-07-25 13:55:35 +0800 | [diff] [blame] | 429 | list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { |
Chunming Zhou | ec75f57 | 2016-06-29 15:23:55 +0800 | [diff] [blame] | 430 | struct amd_sched_fence *s_fence = s_job->s_fence; |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 431 | struct dma_fence *fence; |
Chunming Zhou | bdc2eea | 2016-07-22 13:01:02 +0800 | [diff] [blame] | 432 | |
Chunming Zhou | 1c62cf9 | 2016-07-25 13:55:35 +0800 | [diff] [blame] | 433 | spin_unlock(&sched->job_list_lock); |
| 434 | fence = sched->ops->run_job(s_job); |
Chunming Zhou | bdc2eea | 2016-07-22 13:01:02 +0800 | [diff] [blame] | 435 | atomic_inc(&sched->hw_rq_count); |
Chunming Zhou | ec75f57 | 2016-06-29 15:23:55 +0800 | [diff] [blame] | 436 | if (fence) { |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 437 | s_fence->parent = dma_fence_get(fence); |
| 438 | r = dma_fence_add_callback(fence, &s_fence->cb, |
| 439 | amd_sched_process_job); |
Chunming Zhou | ec75f57 | 2016-06-29 15:23:55 +0800 | [diff] [blame] | 440 | if (r == -ENOENT) |
| 441 | amd_sched_process_job(fence, &s_fence->cb); |
| 442 | else if (r) |
| 443 | DRM_ERROR("fence add callback failed (%d)\n", |
| 444 | r); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 445 | dma_fence_put(fence); |
Chunming Zhou | ec75f57 | 2016-06-29 15:23:55 +0800 | [diff] [blame] | 446 | } else { |
| 447 | DRM_ERROR("Failed to run job!\n"); |
| 448 | amd_sched_process_job(NULL, &s_fence->cb); |
| 449 | } |
Chunming Zhou | 1c62cf9 | 2016-07-25 13:55:35 +0800 | [diff] [blame] | 450 | spin_lock(&sched->job_list_lock); |
Chunming Zhou | ec75f57 | 2016-06-29 15:23:55 +0800 | [diff] [blame] | 451 | } |
| 452 | spin_unlock(&sched->job_list_lock); |
| 453 | } |
| 454 | |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 455 | /** |
| 456 | * Submit a job to the job queue |
| 457 | * |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 458 | * @sched_job The pointer to job required to submit |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 459 | * |
| 460 | * Returns 0 for success, negative error code otherwise. |
| 461 | */ |
Christian König | e284022 | 2015-11-05 19:49:48 +0100 | [diff] [blame] | 462 | void amd_sched_entity_push_job(struct amd_sched_job *sched_job) |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 463 | { |
| 464 | struct amd_sched_entity *entity = sched_job->s_entity; |
Christian König | 6c85927 | 2015-08-20 16:12:50 +0200 | [diff] [blame] | 465 | |
Nicolai Hähnle | 786b521 | 2015-12-02 17:35:12 +0100 | [diff] [blame] | 466 | trace_amd_sched_job(sched_job); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 467 | dma_fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb, |
| 468 | amd_sched_job_finish_cb); |
Christian König | 0f75aee | 2015-09-07 18:07:14 +0200 | [diff] [blame] | 469 | wait_event(entity->sched->job_scheduled, |
Chunming Zhou | c9f0fe5 | 2015-08-31 15:46:12 +0800 | [diff] [blame] | 470 | amd_sched_entity_in(sched_job)); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 471 | } |
| 472 | |
Monk Liu | e686941 | 2016-03-07 12:49:55 +0800 | [diff] [blame] | 473 | /* init a sched_job with basic field */ |
| 474 | int amd_sched_job_init(struct amd_sched_job *job, |
Christian König | 16a7133 | 2016-05-18 09:43:07 +0200 | [diff] [blame] | 475 | struct amd_gpu_scheduler *sched, |
| 476 | struct amd_sched_entity *entity, |
Christian König | 595a9cd | 2016-06-30 10:52:03 +0200 | [diff] [blame] | 477 | void *owner) |
Monk Liu | e686941 | 2016-03-07 12:49:55 +0800 | [diff] [blame] | 478 | { |
| 479 | job->sched = sched; |
| 480 | job->s_entity = entity; |
| 481 | job->s_fence = amd_sched_fence_create(entity, owner); |
| 482 | if (!job->s_fence) |
| 483 | return -ENOMEM; |
Chunming Zhou | cb3696f | 2017-05-09 15:34:07 +0800 | [diff] [blame] | 484 | job->id = atomic64_inc_return(&sched->job_id_count); |
Monk Liu | e686941 | 2016-03-07 12:49:55 +0800 | [diff] [blame] | 485 | |
Christian König | c5f74f7 | 2016-05-19 09:54:15 +0200 | [diff] [blame] | 486 | INIT_WORK(&job->finish_work, amd_sched_job_finish); |
| 487 | INIT_LIST_HEAD(&job->node); |
Christian König | 0e51a77 | 2016-05-18 14:19:32 +0200 | [diff] [blame] | 488 | INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout); |
Monk Liu | 4835096 | 2016-03-04 14:33:44 +0800 | [diff] [blame] | 489 | |
Monk Liu | e686941 | 2016-03-07 12:49:55 +0800 | [diff] [blame] | 490 | return 0; |
| 491 | } |
| 492 | |
Christian König | e688b728 | 2015-08-20 17:01:01 +0200 | [diff] [blame] | 493 | /** |
| 494 | * Return ture if we can push more jobs to the hw. |
| 495 | */ |
| 496 | static bool amd_sched_ready(struct amd_gpu_scheduler *sched) |
| 497 | { |
| 498 | return atomic_read(&sched->hw_rq_count) < |
| 499 | sched->hw_submission_limit; |
| 500 | } |
| 501 | |
| 502 | /** |
Christian König | 8807900 | 2015-08-24 14:29:40 +0200 | [diff] [blame] | 503 | * Wake up the scheduler when it is ready |
| 504 | */ |
| 505 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) |
| 506 | { |
| 507 | if (amd_sched_ready(sched)) |
Christian König | c2b6bd7 | 2015-08-25 21:39:31 +0200 | [diff] [blame] | 508 | wake_up_interruptible(&sched->wake_up_worker); |
Christian König | 8807900 | 2015-08-24 14:29:40 +0200 | [diff] [blame] | 509 | } |
| 510 | |
| 511 | /** |
Christian König | 3d65193 | 2015-11-12 21:10:35 +0100 | [diff] [blame] | 512 | * Select next entity to process |
Christian König | e688b728 | 2015-08-20 17:01:01 +0200 | [diff] [blame] | 513 | */ |
Christian König | 3d65193 | 2015-11-12 21:10:35 +0100 | [diff] [blame] | 514 | static struct amd_sched_entity * |
| 515 | amd_sched_select_entity(struct amd_gpu_scheduler *sched) |
Christian König | e688b728 | 2015-08-20 17:01:01 +0200 | [diff] [blame] | 516 | { |
Christian König | 3d65193 | 2015-11-12 21:10:35 +0100 | [diff] [blame] | 517 | struct amd_sched_entity *entity; |
Chunming Zhou | d033a6d | 2015-11-05 15:23:09 +0800 | [diff] [blame] | 518 | int i; |
Christian König | e688b728 | 2015-08-20 17:01:01 +0200 | [diff] [blame] | 519 | |
| 520 | if (!amd_sched_ready(sched)) |
| 521 | return NULL; |
| 522 | |
| 523 | /* Kernel run queue has higher priority than normal run queue*/ |
Chunming Zhou | 153de9d | 2017-03-16 11:44:49 +0800 | [diff] [blame] | 524 | for (i = AMD_SCHED_PRIORITY_MAX - 1; i >= AMD_SCHED_PRIORITY_MIN; i--) { |
Chunming Zhou | d033a6d | 2015-11-05 15:23:09 +0800 | [diff] [blame] | 525 | entity = amd_sched_rq_select_entity(&sched->sched_rq[i]); |
| 526 | if (entity) |
| 527 | break; |
| 528 | } |
Christian König | e688b728 | 2015-08-20 17:01:01 +0200 | [diff] [blame] | 529 | |
Christian König | 3d65193 | 2015-11-12 21:10:35 +0100 | [diff] [blame] | 530 | return entity; |
Christian König | e688b728 | 2015-08-20 17:01:01 +0200 | [diff] [blame] | 531 | } |
| 532 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 533 | static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 534 | { |
Christian König | 258f3f9 | 2015-08-31 17:02:52 +0200 | [diff] [blame] | 535 | struct amd_sched_fence *s_fence = |
| 536 | container_of(cb, struct amd_sched_fence, cb); |
Christian König | 9b398fa | 2015-09-07 18:16:49 +0200 | [diff] [blame] | 537 | struct amd_gpu_scheduler *sched = s_fence->sched; |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 538 | |
Christian König | c746ba2 | 2015-08-19 16:12:15 +0200 | [diff] [blame] | 539 | atomic_dec(&sched->hw_rq_count); |
Christian König | 6fc1367 | 2016-05-20 12:53:52 +0200 | [diff] [blame] | 540 | amd_sched_fence_finished(s_fence); |
Monk Liu | cccd9bc | 2016-03-04 14:42:26 +0800 | [diff] [blame] | 541 | |
Chunming Zhou | 7034dec | 2015-11-11 14:56:00 +0800 | [diff] [blame] | 542 | trace_amd_sched_process_job(s_fence); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 543 | dma_fence_put(&s_fence->finished); |
Christian König | c2b6bd7 | 2015-08-25 21:39:31 +0200 | [diff] [blame] | 544 | wake_up_interruptible(&sched->wake_up_worker); |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 545 | } |
| 546 | |
Chunming Zhou | 0875dc9 | 2016-06-12 15:41:58 +0800 | [diff] [blame] | 547 | static bool amd_sched_blocked(struct amd_gpu_scheduler *sched) |
| 548 | { |
| 549 | if (kthread_should_park()) { |
| 550 | kthread_parkme(); |
| 551 | return true; |
| 552 | } |
| 553 | |
| 554 | return false; |
| 555 | } |
| 556 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 557 | static int amd_sched_main(void *param) |
| 558 | { |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 559 | struct sched_param sparam = {.sched_priority = 1}; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 560 | struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param; |
Alex Deucher | 5134e99 | 2015-09-04 00:11:54 -0400 | [diff] [blame] | 561 | int r, count; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 562 | |
| 563 | sched_setscheduler(current, SCHED_FIFO, &sparam); |
| 564 | |
| 565 | while (!kthread_should_stop()) { |
Chunming Zhou | 0875dc9 | 2016-06-12 15:41:58 +0800 | [diff] [blame] | 566 | struct amd_sched_entity *entity = NULL; |
Christian König | 258f3f9 | 2015-08-31 17:02:52 +0200 | [diff] [blame] | 567 | struct amd_sched_fence *s_fence; |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 568 | struct amd_sched_job *sched_job; |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 569 | struct dma_fence *fence; |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 570 | |
Christian König | c2b6bd7 | 2015-08-25 21:39:31 +0200 | [diff] [blame] | 571 | wait_event_interruptible(sched->wake_up_worker, |
Chunming Zhou | 0875dc9 | 2016-06-12 15:41:58 +0800 | [diff] [blame] | 572 | (!amd_sched_blocked(sched) && |
| 573 | (entity = amd_sched_select_entity(sched))) || |
| 574 | kthread_should_stop()); |
Christian König | f85a6dd | 2015-08-19 17:37:52 +0200 | [diff] [blame] | 575 | |
Christian König | 3d65193 | 2015-11-12 21:10:35 +0100 | [diff] [blame] | 576 | if (!entity) |
| 577 | continue; |
| 578 | |
| 579 | sched_job = amd_sched_entity_pop_job(entity); |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 580 | if (!sched_job) |
Christian König | f85a6dd | 2015-08-19 17:37:52 +0200 | [diff] [blame] | 581 | continue; |
| 582 | |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 583 | s_fence = sched_job->s_fence; |
Junwei Zhang | 2440ff2 | 2015-10-10 08:48:42 +0800 | [diff] [blame] | 584 | |
Christian König | b034b57 | 2015-08-20 17:08:25 +0200 | [diff] [blame] | 585 | atomic_inc(&sched->hw_rq_count); |
Christian König | 7392c32 | 2016-05-18 13:00:38 +0200 | [diff] [blame] | 586 | amd_sched_job_begin(sched_job); |
Christian König | 7392c32 | 2016-05-18 13:00:38 +0200 | [diff] [blame] | 587 | |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 588 | fence = sched->ops->run_job(sched_job); |
Christian König | 393a0bd | 2015-11-05 12:57:10 +0100 | [diff] [blame] | 589 | amd_sched_fence_scheduled(s_fence); |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 590 | if (fence) { |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 591 | s_fence->parent = dma_fence_get(fence); |
| 592 | r = dma_fence_add_callback(fence, &s_fence->cb, |
| 593 | amd_sched_process_job); |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 594 | if (r == -ENOENT) |
Christian König | 258f3f9 | 2015-08-31 17:02:52 +0200 | [diff] [blame] | 595 | amd_sched_process_job(fence, &s_fence->cb); |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 596 | else if (r) |
Christian König | 16a7133 | 2016-05-18 09:43:07 +0200 | [diff] [blame] | 597 | DRM_ERROR("fence add callback failed (%d)\n", |
| 598 | r); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 599 | dma_fence_put(fence); |
Christian König | 27439fc | 2015-09-02 12:03:06 +0200 | [diff] [blame] | 600 | } else { |
| 601 | DRM_ERROR("Failed to run job!\n"); |
Christian König | 258f3f9 | 2015-08-31 17:02:52 +0200 | [diff] [blame] | 602 | amd_sched_process_job(NULL, &s_fence->cb); |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 603 | } |
Christian König | aef4852 | 2015-08-20 14:47:46 +0200 | [diff] [blame] | 604 | |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 605 | count = kfifo_out(&entity->job_queue, &sched_job, |
| 606 | sizeof(sched_job)); |
| 607 | WARN_ON(count != sizeof(sched_job)); |
Christian König | c2b6bd7 | 2015-08-25 21:39:31 +0200 | [diff] [blame] | 608 | wake_up(&sched->job_scheduled); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 609 | } |
| 610 | return 0; |
| 611 | } |
| 612 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 613 | /** |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 614 | * Init a gpu scheduler instance |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 615 | * |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 616 | * @sched The pointer to the scheduler |
Christian König | 69f7dd6 | 2015-08-20 17:24:40 +0200 | [diff] [blame] | 617 | * @ops The backend operations for this scheduler. |
Christian König | 69f7dd6 | 2015-08-20 17:24:40 +0200 | [diff] [blame] | 618 | * @hw_submissions Number of hw submissions to do. |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 619 | * @name Name used for debugging |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 620 | * |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 621 | * Return 0 on success, otherwise error code. |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 622 | */ |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 623 | int amd_sched_init(struct amd_gpu_scheduler *sched, |
Nils Wallménius | 62250a9 | 2016-04-10 16:30:00 +0200 | [diff] [blame] | 624 | const struct amd_sched_backend_ops *ops, |
Junwei Zhang | 2440ff2 | 2015-10-10 08:48:42 +0800 | [diff] [blame] | 625 | unsigned hw_submission, long timeout, const char *name) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 626 | { |
Chunming Zhou | d033a6d | 2015-11-05 15:23:09 +0800 | [diff] [blame] | 627 | int i; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 628 | sched->ops = ops; |
Chunming Zhou | 4cef926 | 2015-08-05 19:52:14 +0800 | [diff] [blame] | 629 | sched->hw_submission_limit = hw_submission; |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 630 | sched->name = name; |
Junwei Zhang | 2440ff2 | 2015-10-10 08:48:42 +0800 | [diff] [blame] | 631 | sched->timeout = timeout; |
Chunming Zhou | 153de9d | 2017-03-16 11:44:49 +0800 | [diff] [blame] | 632 | for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++) |
Chunming Zhou | d033a6d | 2015-11-05 15:23:09 +0800 | [diff] [blame] | 633 | amd_sched_rq_init(&sched->sched_rq[i]); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 634 | |
Christian König | c2b6bd7 | 2015-08-25 21:39:31 +0200 | [diff] [blame] | 635 | init_waitqueue_head(&sched->wake_up_worker); |
| 636 | init_waitqueue_head(&sched->job_scheduled); |
Monk Liu | 4835096 | 2016-03-04 14:33:44 +0800 | [diff] [blame] | 637 | INIT_LIST_HEAD(&sched->ring_mirror_list); |
| 638 | spin_lock_init(&sched->job_list_lock); |
Christian König | c746ba2 | 2015-08-19 16:12:15 +0200 | [diff] [blame] | 639 | atomic_set(&sched->hw_rq_count, 0); |
Andres Rodriguez | 93f8b36 | 2017-03-09 21:25:50 -0500 | [diff] [blame] | 640 | atomic64_set(&sched->job_id_count, 0); |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 641 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 642 | /* Each scheduler will run on a seperate kernel thread */ |
Christian König | c14692f0 | 2015-08-21 15:18:47 +0200 | [diff] [blame] | 643 | sched->thread = kthread_run(amd_sched_main, sched, sched->name); |
Christian König | f495659 | 2015-08-20 16:59:38 +0200 | [diff] [blame] | 644 | if (IS_ERR(sched->thread)) { |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 645 | DRM_ERROR("Failed to create scheduler for %s.\n", name); |
| 646 | return PTR_ERR(sched->thread); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 647 | } |
| 648 | |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 649 | return 0; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 650 | } |
| 651 | |
| 652 | /** |
| 653 | * Destroy a gpu scheduler |
| 654 | * |
| 655 | * @sched The pointer to the scheduler |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 656 | */ |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 657 | void amd_sched_fini(struct amd_gpu_scheduler *sched) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 658 | { |
Dave Airlie | 32544d0 | 2015-11-03 11:10:03 -0500 | [diff] [blame] | 659 | if (sched->thread) |
| 660 | kthread_stop(sched->thread); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 661 | } |