Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2015 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * |
| 22 | * |
| 23 | */ |
| 24 | #include <linux/kthread.h> |
| 25 | #include <linux/wait.h> |
| 26 | #include <linux/sched.h> |
| 27 | #include <drm/drmP.h> |
| 28 | #include "gpu_scheduler.h" |
| 29 | |
| 30 | /* Initialize a given run queue struct */ |
| 31 | static void init_rq(struct amd_run_queue *rq) |
| 32 | { |
| 33 | INIT_LIST_HEAD(&rq->head.list); |
| 34 | rq->head.belongto_rq = rq; |
| 35 | mutex_init(&rq->lock); |
| 36 | atomic_set(&rq->nr_entity, 0); |
| 37 | rq->current_entity = &rq->head; |
| 38 | } |
| 39 | |
| 40 | /* Note: caller must hold the lock or in a atomic context */ |
| 41 | static void rq_remove_entity(struct amd_run_queue *rq, |
| 42 | struct amd_sched_entity *entity) |
| 43 | { |
| 44 | if (rq->current_entity == entity) |
| 45 | rq->current_entity = list_entry(entity->list.prev, |
| 46 | typeof(*entity), list); |
| 47 | list_del_init(&entity->list); |
| 48 | atomic_dec(&rq->nr_entity); |
| 49 | } |
| 50 | |
| 51 | static void rq_add_entity(struct amd_run_queue *rq, |
| 52 | struct amd_sched_entity *entity) |
| 53 | { |
| 54 | list_add_tail(&entity->list, &rq->head.list); |
| 55 | atomic_inc(&rq->nr_entity); |
| 56 | } |
| 57 | |
| 58 | /** |
| 59 | * Select next entity from a specified run queue with round robin policy. |
| 60 | * It could return the same entity as current one if current is the only |
| 61 | * available one in the queue. Return NULL if nothing available. |
| 62 | */ |
| 63 | static struct amd_sched_entity *rq_select_entity(struct amd_run_queue *rq) |
| 64 | { |
| 65 | struct amd_sched_entity *p = rq->current_entity; |
| 66 | int i = atomic_read(&rq->nr_entity) + 1; /*real count + dummy head*/ |
Christian König | 4cd7f42c | 2015-08-05 18:18:52 +0200 | [diff] [blame] | 67 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 68 | while (i) { |
| 69 | p = list_entry(p->list.next, typeof(*p), list); |
| 70 | if (!rq->check_entity_status(p)) { |
| 71 | rq->current_entity = p; |
| 72 | break; |
| 73 | } |
| 74 | i--; |
| 75 | } |
| 76 | return i ? p : NULL; |
| 77 | } |
| 78 | |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 79 | static bool context_entity_is_waiting(struct amd_sched_entity *entity) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 80 | { |
| 81 | /* TODO: sync obj for multi-ring synchronization */ |
| 82 | return false; |
| 83 | } |
| 84 | |
| 85 | static int gpu_entity_check_status(struct amd_sched_entity *entity) |
| 86 | { |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 87 | if (entity == &entity->belongto_rq->head) |
| 88 | return -1; |
| 89 | |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 90 | if (kfifo_is_empty(&entity->job_queue) || |
| 91 | context_entity_is_waiting(entity)) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 92 | return -1; |
| 93 | |
| 94 | return 0; |
| 95 | } |
| 96 | |
| 97 | /** |
| 98 | * Note: This function should only been called inside scheduler main |
| 99 | * function for thread safety, there is no other protection here. |
| 100 | * return ture if scheduler has something ready to run. |
| 101 | * |
| 102 | * For active_hw_rq, there is only one producer(scheduler thread) and |
| 103 | * one consumer(ISR). It should be safe to use this function in scheduler |
| 104 | * main thread to decide whether to continue emit more IBs. |
| 105 | */ |
| 106 | static bool is_scheduler_ready(struct amd_gpu_scheduler *sched) |
| 107 | { |
Chunming Zhou | 4cef926 | 2015-08-05 19:52:14 +0800 | [diff] [blame] | 108 | unsigned long flags; |
| 109 | bool full; |
Christian König | 4cd7f42c | 2015-08-05 18:18:52 +0200 | [diff] [blame] | 110 | |
Chunming Zhou | 4cef926 | 2015-08-05 19:52:14 +0800 | [diff] [blame] | 111 | spin_lock_irqsave(&sched->queue_lock, flags); |
| 112 | full = atomic64_read(&sched->hw_rq_count) < |
| 113 | sched->hw_submission_limit ? true : false; |
| 114 | spin_unlock_irqrestore(&sched->queue_lock, flags); |
| 115 | |
| 116 | return full; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 117 | } |
| 118 | |
| 119 | /** |
| 120 | * Select next entity from the kernel run queue, if not available, |
| 121 | * return null. |
| 122 | */ |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 123 | static struct amd_sched_entity * |
Christian König | 4cd7f42c | 2015-08-05 18:18:52 +0200 | [diff] [blame] | 124 | kernel_rq_select_context(struct amd_gpu_scheduler *sched) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 125 | { |
Christian König | 4cd7f42c | 2015-08-05 18:18:52 +0200 | [diff] [blame] | 126 | struct amd_sched_entity *sched_entity; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 127 | struct amd_run_queue *rq = &sched->kernel_rq; |
| 128 | |
| 129 | mutex_lock(&rq->lock); |
| 130 | sched_entity = rq_select_entity(rq); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 131 | mutex_unlock(&rq->lock); |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 132 | return sched_entity; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 133 | } |
| 134 | |
| 135 | /** |
| 136 | * Select next entity containing real IB submissions |
| 137 | */ |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 138 | static struct amd_sched_entity * |
Christian König | 4cd7f42c | 2015-08-05 18:18:52 +0200 | [diff] [blame] | 139 | select_context(struct amd_gpu_scheduler *sched) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 140 | { |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 141 | struct amd_sched_entity *wake_entity = NULL; |
| 142 | struct amd_sched_entity *tmp; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 143 | struct amd_run_queue *rq; |
| 144 | |
| 145 | if (!is_scheduler_ready(sched)) |
| 146 | return NULL; |
| 147 | |
| 148 | /* Kernel run queue has higher priority than normal run queue*/ |
| 149 | tmp = kernel_rq_select_context(sched); |
| 150 | if (tmp != NULL) |
| 151 | goto exit; |
| 152 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 153 | rq = &sched->sched_rq; |
| 154 | mutex_lock(&rq->lock); |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 155 | tmp = rq_select_entity(rq); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 156 | mutex_unlock(&rq->lock); |
| 157 | exit: |
| 158 | if (sched->current_entity && (sched->current_entity != tmp)) |
| 159 | wake_entity = sched->current_entity; |
| 160 | sched->current_entity = tmp; |
Chunming Zhou | 1c8f805 | 2015-08-13 13:04:06 +0800 | [diff] [blame^] | 161 | if (wake_entity && wake_entity->need_wakeup) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 162 | wake_up(&wake_entity->wait_queue); |
| 163 | return tmp; |
| 164 | } |
| 165 | |
| 166 | /** |
| 167 | * Init a context entity used by scheduler when submit to HW ring. |
| 168 | * |
| 169 | * @sched The pointer to the scheduler |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 170 | * @entity The pointer to a valid amd_sched_entity |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 171 | * @rq The run queue this entity belongs |
Christian König | 0e89d0c | 2015-08-04 16:58:36 +0200 | [diff] [blame] | 172 | * @kernel If this is an entity for the kernel |
Jammy Zhou | 1333f72 | 2015-07-30 16:36:58 +0800 | [diff] [blame] | 173 | * @jobs The max number of jobs in the job queue |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 174 | * |
| 175 | * return 0 if succeed. negative error code on failure |
| 176 | */ |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 177 | int amd_sched_entity_init(struct amd_gpu_scheduler *sched, |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 178 | struct amd_sched_entity *entity, |
| 179 | struct amd_run_queue *rq, |
| 180 | uint32_t jobs) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 181 | { |
| 182 | uint64_t seq_ring = 0; |
Chunming Zhou | f556cb0c | 2015-08-02 11:18:04 +0800 | [diff] [blame] | 183 | char name[20]; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 184 | |
| 185 | if (!(sched && entity && rq)) |
| 186 | return -EINVAL; |
| 187 | |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 188 | memset(entity, 0, sizeof(struct amd_sched_entity)); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 189 | seq_ring = ((uint64_t)sched->ring_id) << 60; |
| 190 | spin_lock_init(&entity->lock); |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 191 | entity->belongto_rq = rq; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 192 | entity->scheduler = sched; |
| 193 | init_waitqueue_head(&entity->wait_queue); |
| 194 | init_waitqueue_head(&entity->wait_emit); |
Chunming Zhou | f556cb0c | 2015-08-02 11:18:04 +0800 | [diff] [blame] | 195 | entity->fence_context = fence_context_alloc(1); |
| 196 | snprintf(name, sizeof(name), "c_entity[%llu]", entity->fence_context); |
| 197 | memcpy(entity->name, name, 20); |
Chunming Zhou | 1c8f805 | 2015-08-13 13:04:06 +0800 | [diff] [blame^] | 198 | entity->need_wakeup = false; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 199 | if(kfifo_alloc(&entity->job_queue, |
Jammy Zhou | 1333f72 | 2015-07-30 16:36:58 +0800 | [diff] [blame] | 200 | jobs * sizeof(void *), |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 201 | GFP_KERNEL)) |
| 202 | return -EINVAL; |
| 203 | |
| 204 | spin_lock_init(&entity->queue_lock); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 205 | atomic64_set(&entity->last_queued_v_seq, seq_ring); |
Chunming Zhou | f556cb0c | 2015-08-02 11:18:04 +0800 | [diff] [blame] | 206 | atomic64_set(&entity->last_signaled_v_seq, seq_ring); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 207 | |
| 208 | /* Add the entity to the run queue */ |
| 209 | mutex_lock(&rq->lock); |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 210 | rq_add_entity(rq, entity); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 211 | mutex_unlock(&rq->lock); |
| 212 | return 0; |
| 213 | } |
| 214 | |
| 215 | /** |
| 216 | * Query if entity is initialized |
| 217 | * |
| 218 | * @sched Pointer to scheduler instance |
| 219 | * @entity The pointer to a valid scheduler entity |
| 220 | * |
| 221 | * return true if entity is initialized, false otherwise |
| 222 | */ |
| 223 | static bool is_context_entity_initialized(struct amd_gpu_scheduler *sched, |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 224 | struct amd_sched_entity *entity) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 225 | { |
| 226 | return entity->scheduler == sched && |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 227 | entity->belongto_rq != NULL; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 228 | } |
| 229 | |
| 230 | static bool is_context_entity_idle(struct amd_gpu_scheduler *sched, |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 231 | struct amd_sched_entity *entity) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 232 | { |
| 233 | /** |
| 234 | * Idle means no pending IBs, and the entity is not |
| 235 | * currently being used. |
| 236 | */ |
| 237 | barrier(); |
| 238 | if ((sched->current_entity != entity) && |
| 239 | kfifo_is_empty(&entity->job_queue)) |
| 240 | return true; |
| 241 | |
| 242 | return false; |
| 243 | } |
| 244 | |
| 245 | /** |
| 246 | * Destroy a context entity |
| 247 | * |
| 248 | * @sched Pointer to scheduler instance |
| 249 | * @entity The pointer to a valid scheduler entity |
| 250 | * |
| 251 | * return 0 if succeed. negative error code on failure |
| 252 | */ |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 253 | int amd_sched_entity_fini(struct amd_gpu_scheduler *sched, |
| 254 | struct amd_sched_entity *entity) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 255 | { |
| 256 | int r = 0; |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 257 | struct amd_run_queue *rq = entity->belongto_rq; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 258 | |
| 259 | if (!is_context_entity_initialized(sched, entity)) |
| 260 | return 0; |
Chunming Zhou | 1c8f805 | 2015-08-13 13:04:06 +0800 | [diff] [blame^] | 261 | entity->need_wakeup = true; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 262 | /** |
| 263 | * The client will not queue more IBs during this fini, consume existing |
| 264 | * queued IBs |
| 265 | */ |
| 266 | r = wait_event_timeout( |
| 267 | entity->wait_queue, |
| 268 | is_context_entity_idle(sched, entity), |
| 269 | msecs_to_jiffies(AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS) |
| 270 | ) ? 0 : -1; |
| 271 | |
| 272 | if (r) { |
| 273 | if (entity->is_pending) |
Christian König | 0e89d0c | 2015-08-04 16:58:36 +0200 | [diff] [blame] | 274 | DRM_INFO("Entity %p is in waiting state during fini,\ |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 275 | all pending ibs will be canceled.\n", |
Christian König | 0e89d0c | 2015-08-04 16:58:36 +0200 | [diff] [blame] | 276 | entity); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 277 | } |
| 278 | |
| 279 | mutex_lock(&rq->lock); |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 280 | rq_remove_entity(rq, entity); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 281 | mutex_unlock(&rq->lock); |
| 282 | kfifo_free(&entity->job_queue); |
| 283 | return r; |
| 284 | } |
| 285 | |
| 286 | /** |
| 287 | * Submit a normal job to the job queue |
| 288 | * |
| 289 | * @sched The pointer to the scheduler |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 290 | * @c_entity The pointer to amd_sched_entity |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 291 | * @job The pointer to job required to submit |
Chunming Zhou | 80de591 | 2015-08-05 19:07:08 +0800 | [diff] [blame] | 292 | * return 0 if succeed. -1 if failed. |
| 293 | * -2 indicate queue is full for this client, client should wait untill |
| 294 | * scheduler consum some queued command. |
| 295 | * -1 other fail. |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 296 | */ |
Chunming Zhou | 80de591 | 2015-08-05 19:07:08 +0800 | [diff] [blame] | 297 | int amd_sched_push_job(struct amd_gpu_scheduler *sched, |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 298 | struct amd_sched_entity *c_entity, |
Chunming Zhou | f556cb0c | 2015-08-02 11:18:04 +0800 | [diff] [blame] | 299 | void *data, |
| 300 | struct amd_sched_fence **fence) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 301 | { |
Chunming Zhou | f556cb0c | 2015-08-02 11:18:04 +0800 | [diff] [blame] | 302 | struct amd_sched_job *job; |
| 303 | |
| 304 | if (!fence) |
| 305 | return -EINVAL; |
| 306 | job = kzalloc(sizeof(struct amd_sched_job), GFP_KERNEL); |
Chunming Zhou | 953e8fd | 2015-08-06 15:19:12 +0800 | [diff] [blame] | 307 | if (!job) |
| 308 | return -ENOMEM; |
| 309 | job->sched = sched; |
| 310 | job->s_entity = c_entity; |
| 311 | job->data = data; |
Chunming Zhou | f556cb0c | 2015-08-02 11:18:04 +0800 | [diff] [blame] | 312 | *fence = amd_sched_fence_create(c_entity); |
| 313 | if ((*fence) == NULL) { |
| 314 | kfree(job); |
| 315 | return -EINVAL; |
| 316 | } |
Chunming Zhou | 281b422 | 2015-08-12 12:58:31 +0800 | [diff] [blame] | 317 | fence_get(&(*fence)->base); |
Chunming Zhou | f556cb0c | 2015-08-02 11:18:04 +0800 | [diff] [blame] | 318 | job->s_fence = *fence; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 319 | while (kfifo_in_spinlocked(&c_entity->job_queue, &job, sizeof(void *), |
| 320 | &c_entity->queue_lock) != sizeof(void *)) { |
| 321 | /** |
| 322 | * Current context used up all its IB slots |
| 323 | * wait here, or need to check whether GPU is hung |
| 324 | */ |
| 325 | schedule(); |
| 326 | } |
Chunming Zhou | 1c8f805 | 2015-08-13 13:04:06 +0800 | [diff] [blame^] | 327 | /* first job wake up scheduler */ |
| 328 | if ((kfifo_len(&c_entity->job_queue) / sizeof(void *)) == 1) |
| 329 | wake_up_interruptible(&sched->wait_queue); |
Chunming Zhou | 80de591 | 2015-08-05 19:07:08 +0800 | [diff] [blame] | 330 | return 0; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 331 | } |
| 332 | |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 333 | static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) |
| 334 | { |
| 335 | struct amd_sched_job *sched_job = |
| 336 | container_of(cb, struct amd_sched_job, cb); |
| 337 | struct amd_gpu_scheduler *sched; |
| 338 | unsigned long flags; |
| 339 | |
| 340 | sched = sched_job->sched; |
Chunming Zhou | f556cb0c | 2015-08-02 11:18:04 +0800 | [diff] [blame] | 341 | atomic64_set(&sched_job->s_entity->last_signaled_v_seq, |
| 342 | sched_job->s_fence->v_seq); |
| 343 | amd_sched_fence_signal(sched_job->s_fence); |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 344 | spin_lock_irqsave(&sched->queue_lock, flags); |
| 345 | list_del(&sched_job->list); |
| 346 | atomic64_dec(&sched->hw_rq_count); |
| 347 | spin_unlock_irqrestore(&sched->queue_lock, flags); |
| 348 | |
Chunming Zhou | 953e8fd | 2015-08-06 15:19:12 +0800 | [diff] [blame] | 349 | sched->ops->process_job(sched, sched_job); |
Chunming Zhou | f556cb0c | 2015-08-02 11:18:04 +0800 | [diff] [blame] | 350 | fence_put(&sched_job->s_fence->base); |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 351 | kfree(sched_job); |
| 352 | wake_up_interruptible(&sched->wait_queue); |
| 353 | } |
| 354 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 355 | static int amd_sched_main(void *param) |
| 356 | { |
| 357 | int r; |
Chunming Zhou | 953e8fd | 2015-08-06 15:19:12 +0800 | [diff] [blame] | 358 | struct amd_sched_job *job; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 359 | struct sched_param sparam = {.sched_priority = 1}; |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 360 | struct amd_sched_entity *c_entity = NULL; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 361 | struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param; |
| 362 | |
| 363 | sched_setscheduler(current, SCHED_FIFO, &sparam); |
| 364 | |
| 365 | while (!kthread_should_stop()) { |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 366 | struct fence *fence; |
| 367 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 368 | wait_event_interruptible(sched->wait_queue, |
| 369 | is_scheduler_ready(sched) && |
| 370 | (c_entity = select_context(sched))); |
| 371 | r = kfifo_out(&c_entity->job_queue, &job, sizeof(void *)); |
| 372 | if (r != sizeof(void *)) |
| 373 | continue; |
| 374 | r = sched->ops->prepare_job(sched, c_entity, job); |
Chunming Zhou | 4cef926 | 2015-08-05 19:52:14 +0800 | [diff] [blame] | 375 | if (!r) { |
| 376 | unsigned long flags; |
Chunming Zhou | 4cef926 | 2015-08-05 19:52:14 +0800 | [diff] [blame] | 377 | spin_lock_irqsave(&sched->queue_lock, flags); |
Chunming Zhou | 953e8fd | 2015-08-06 15:19:12 +0800 | [diff] [blame] | 378 | list_add_tail(&job->list, &sched->active_hw_rq); |
Chunming Zhou | 4cef926 | 2015-08-05 19:52:14 +0800 | [diff] [blame] | 379 | atomic64_inc(&sched->hw_rq_count); |
| 380 | spin_unlock_irqrestore(&sched->queue_lock, flags); |
| 381 | } |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 382 | mutex_lock(&sched->sched_lock); |
Chunming Zhou | 953e8fd | 2015-08-06 15:19:12 +0800 | [diff] [blame] | 383 | fence = sched->ops->run_job(sched, c_entity, job); |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 384 | if (fence) { |
Chunming Zhou | 953e8fd | 2015-08-06 15:19:12 +0800 | [diff] [blame] | 385 | r = fence_add_callback(fence, &job->cb, |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 386 | amd_sched_process_job); |
| 387 | if (r == -ENOENT) |
Chunming Zhou | 953e8fd | 2015-08-06 15:19:12 +0800 | [diff] [blame] | 388 | amd_sched_process_job(fence, &job->cb); |
Christian König | 6f0e54a | 2015-08-05 21:22:10 +0200 | [diff] [blame] | 389 | else if (r) |
| 390 | DRM_ERROR("fence add callback failed (%d)\n", r); |
| 391 | fence_put(fence); |
| 392 | } |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 393 | mutex_unlock(&sched->sched_lock); |
| 394 | } |
| 395 | return 0; |
| 396 | } |
| 397 | |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 398 | /** |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 399 | * Create a gpu scheduler |
| 400 | * |
| 401 | * @device The device context for this scheduler |
| 402 | * @ops The backend operations for this scheduler. |
| 403 | * @id The scheduler is per ring, here is ring id. |
| 404 | * @granularity The minumum ms unit the scheduler will scheduled. |
| 405 | * @preemption Indicate whether this ring support preemption, 0 is no. |
| 406 | * |
| 407 | * return the pointer to scheduler for success, otherwise return NULL |
| 408 | */ |
| 409 | struct amd_gpu_scheduler *amd_sched_create(void *device, |
| 410 | struct amd_sched_backend_ops *ops, |
| 411 | unsigned ring, |
| 412 | unsigned granularity, |
Jammy Zhou | 4afcb30 | 2015-07-30 16:44:05 +0800 | [diff] [blame] | 413 | unsigned preemption, |
| 414 | unsigned hw_submission) |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 415 | { |
| 416 | struct amd_gpu_scheduler *sched; |
Christian König | 4cd7f42c | 2015-08-05 18:18:52 +0200 | [diff] [blame] | 417 | char name[20]; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 418 | |
| 419 | sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL); |
| 420 | if (!sched) |
| 421 | return NULL; |
| 422 | |
| 423 | sched->device = device; |
| 424 | sched->ops = ops; |
| 425 | sched->granularity = granularity; |
| 426 | sched->ring_id = ring; |
| 427 | sched->preemption = preemption; |
Chunming Zhou | 4cef926 | 2015-08-05 19:52:14 +0800 | [diff] [blame] | 428 | sched->hw_submission_limit = hw_submission; |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 429 | snprintf(name, sizeof(name), "gpu_sched[%d]", ring); |
| 430 | mutex_init(&sched->sched_lock); |
| 431 | spin_lock_init(&sched->queue_lock); |
| 432 | init_rq(&sched->sched_rq); |
| 433 | sched->sched_rq.check_entity_status = gpu_entity_check_status; |
| 434 | |
| 435 | init_rq(&sched->kernel_rq); |
| 436 | sched->kernel_rq.check_entity_status = gpu_entity_check_status; |
| 437 | |
| 438 | init_waitqueue_head(&sched->wait_queue); |
Chunming Zhou | 4cef926 | 2015-08-05 19:52:14 +0800 | [diff] [blame] | 439 | INIT_LIST_HEAD(&sched->active_hw_rq); |
| 440 | atomic64_set(&sched->hw_rq_count, 0); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 441 | /* Each scheduler will run on a seperate kernel thread */ |
| 442 | sched->thread = kthread_create(amd_sched_main, sched, name); |
| 443 | if (sched->thread) { |
| 444 | wake_up_process(sched->thread); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 445 | return sched; |
| 446 | } |
| 447 | |
| 448 | DRM_ERROR("Failed to create scheduler for id %d.\n", ring); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 449 | kfree(sched); |
| 450 | return NULL; |
| 451 | } |
| 452 | |
| 453 | /** |
| 454 | * Destroy a gpu scheduler |
| 455 | * |
| 456 | * @sched The pointer to the scheduler |
| 457 | * |
| 458 | * return 0 if succeed. -1 if failed. |
| 459 | */ |
| 460 | int amd_sched_destroy(struct amd_gpu_scheduler *sched) |
| 461 | { |
| 462 | kthread_stop(sched->thread); |
Jammy Zhou | a72ce6f | 2015-05-22 18:55:07 +0800 | [diff] [blame] | 463 | kfree(sched); |
| 464 | return 0; |
| 465 | } |
| 466 | |
Jammy Zhou | f95b7e3 | 2015-07-31 17:18:15 +0800 | [diff] [blame] | 467 | /** |
Jammy Zhou | 27f6642 | 2015-08-03 10:27:57 +0800 | [diff] [blame] | 468 | * Get next queued sequence number |
| 469 | * |
| 470 | * @entity The context entity |
| 471 | * |
| 472 | * return the next queued sequence number |
| 473 | */ |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 474 | uint64_t amd_sched_next_queued_seq(struct amd_sched_entity *c_entity) |
Jammy Zhou | 27f6642 | 2015-08-03 10:27:57 +0800 | [diff] [blame] | 475 | { |
| 476 | return atomic64_read(&c_entity->last_queued_v_seq) + 1; |
| 477 | } |