blob: 38e622ce06de070f26e3d1c88c39d47adb118ca0 [file] [log] [blame]
Jammy Zhoua72ce6f2015-05-22 18:55:07 +08001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _GPU_SCHEDULER_H_
25#define _GPU_SCHEDULER_H_
26
27#include <linux/kfifo.h>
Chris Wilsonf54d1862016-10-25 13:00:45 +010028#include <linux/dma-fence.h>
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080029
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080030struct amd_gpu_scheduler;
Christian König432a4ff2015-08-12 11:46:04 +020031struct amd_sched_rq;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080032
33/**
34 * A scheduler entity is a wrapper around a job queue or a group
Monk Liue472d252016-03-03 19:00:50 +080035 * of other entities. Entities take turns emitting jobs from their
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080036 * job queues to corresponding hardware ring based on scheduling
37 * policy.
38*/
39struct amd_sched_entity {
40 struct list_head list;
Christian König0f75aee2015-09-07 18:07:14 +020041 struct amd_sched_rq *rq;
42 struct amd_gpu_scheduler *sched;
43
Christian König91404fb2015-08-05 18:33:21 +020044 spinlock_t queue_lock;
Christian König0f75aee2015-09-07 18:07:14 +020045 struct kfifo job_queue;
46
47 atomic_t fence_seq;
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080048 uint64_t fence_context;
Christian König0f75aee2015-09-07 18:07:14 +020049
Chris Wilsonf54d1862016-10-25 13:00:45 +010050 struct dma_fence *dependency;
51 struct dma_fence_cb cb;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080052};
53
54/**
55 * Run queue is a set of entities scheduling command submissions for
56 * one specific ring. It implements the scheduling policy that selects
57 * the next entity to emit commands from.
58*/
Christian König432a4ff2015-08-12 11:46:04 +020059struct amd_sched_rq {
Christian König2b184d82015-08-18 14:41:25 +020060 spinlock_t lock;
Christian König432a4ff2015-08-12 11:46:04 +020061 struct list_head entities;
62 struct amd_sched_entity *current_entity;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080063};
64
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080065struct amd_sched_fence {
Chris Wilsonf54d1862016-10-25 13:00:45 +010066 struct dma_fence scheduled;
67 struct dma_fence finished;
68 struct dma_fence_cb cb;
69 struct dma_fence *parent;
Christian König9b398fa2015-09-07 18:16:49 +020070 struct amd_gpu_scheduler *sched;
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080071 spinlock_t lock;
Chunming Zhou84f76ea2015-08-24 12:47:36 +080072 void *owner;
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080073};
74
Chunming Zhou4cef9262015-08-05 19:52:14 +080075struct amd_sched_job {
Chunming Zhou4cef9262015-08-05 19:52:14 +080076 struct amd_gpu_scheduler *sched;
Chunming Zhou953e8fd2015-08-06 15:19:12 +080077 struct amd_sched_entity *s_entity;
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080078 struct amd_sched_fence *s_fence;
Chris Wilsonf54d1862016-10-25 13:00:45 +010079 struct dma_fence_cb finish_cb;
Christian Königc5f74f72016-05-19 09:54:15 +020080 struct work_struct finish_work;
81 struct list_head node;
82 struct delayed_work work_tdr;
Andres Rodriguez93f8b362017-03-09 21:25:50 -050083 uint64_t id;
Monk Liu65781c72017-05-11 13:36:44 +080084 atomic_t karma;
Chunming Zhou4cef9262015-08-05 19:52:14 +080085};
86
Chris Wilsonf54d1862016-10-25 13:00:45 +010087extern const struct dma_fence_ops amd_sched_fence_ops_scheduled;
88extern const struct dma_fence_ops amd_sched_fence_ops_finished;
89static inline struct amd_sched_fence *to_amd_sched_fence(struct dma_fence *f)
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080090{
Christian König6fc13672016-05-20 12:53:52 +020091 if (f->ops == &amd_sched_fence_ops_scheduled)
92 return container_of(f, struct amd_sched_fence, scheduled);
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080093
Christian König6fc13672016-05-20 12:53:52 +020094 if (f->ops == &amd_sched_fence_ops_finished)
95 return container_of(f, struct amd_sched_fence, finished);
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080096
97 return NULL;
98}
99
Monk Liu65781c72017-05-11 13:36:44 +0800100static inline bool amd_sched_invalidate_job(struct amd_sched_job *s_job, int threshold)
101{
102 return (s_job && atomic_inc_return(&s_job->karma) > threshold);
103}
104
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800105/**
106 * Define the backend operations called by the scheduler,
107 * these functions should be implemented in driver side
108*/
109struct amd_sched_backend_ops {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100110 struct dma_fence *(*dependency)(struct amd_sched_job *sched_job);
111 struct dma_fence *(*run_job)(struct amd_sched_job *sched_job);
Christian König0e51a772016-05-18 14:19:32 +0200112 void (*timedout_job)(struct amd_sched_job *sched_job);
Christian Königc5f74f72016-05-19 09:54:15 +0200113 void (*free_job)(struct amd_sched_job *sched_job);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800114};
115
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800116enum amd_sched_priority {
Chunming Zhou153de9d2017-03-16 11:44:49 +0800117 AMD_SCHED_PRIORITY_MIN,
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500118 AMD_SCHED_PRIORITY_LOW = AMD_SCHED_PRIORITY_MIN,
119 AMD_SCHED_PRIORITY_NORMAL,
120 AMD_SCHED_PRIORITY_HIGH_SW,
121 AMD_SCHED_PRIORITY_HIGH_HW,
Chunming Zhou153de9d2017-03-16 11:44:49 +0800122 AMD_SCHED_PRIORITY_KERNEL,
123 AMD_SCHED_PRIORITY_MAX
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800124};
125
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800126/**
127 * One scheduler is implemented for each hardware ring
128*/
129struct amd_gpu_scheduler {
Nils Wallménius62250a92016-04-10 16:30:00 +0200130 const struct amd_sched_backend_ops *ops;
Christian König4f839a22015-09-08 20:22:31 +0200131 uint32_t hw_submission_limit;
Junwei Zhang2440ff22015-10-10 08:48:42 +0800132 long timeout;
Christian König4f839a22015-09-08 20:22:31 +0200133 const char *name;
Chunming Zhou153de9d2017-03-16 11:44:49 +0800134 struct amd_sched_rq sched_rq[AMD_SCHED_PRIORITY_MAX];
Christian Königc2b6bd72015-08-25 21:39:31 +0200135 wait_queue_head_t wake_up_worker;
136 wait_queue_head_t job_scheduled;
Christian König4f839a22015-09-08 20:22:31 +0200137 atomic_t hw_rq_count;
Andres Rodriguez93f8b362017-03-09 21:25:50 -0500138 atomic64_t job_id_count;
Christian König4f839a22015-09-08 20:22:31 +0200139 struct task_struct *thread;
Monk Liu48350962016-03-04 14:33:44 +0800140 struct list_head ring_mirror_list;
141 spinlock_t job_list_lock;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800142};
143
Christian König4f839a22015-09-08 20:22:31 +0200144int amd_sched_init(struct amd_gpu_scheduler *sched,
Nils Wallménius62250a92016-04-10 16:30:00 +0200145 const struct amd_sched_backend_ops *ops,
Junwei Zhang2440ff22015-10-10 08:48:42 +0800146 uint32_t hw_submission, long timeout, const char *name);
Christian König4f839a22015-09-08 20:22:31 +0200147void amd_sched_fini(struct amd_gpu_scheduler *sched);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800148
Christian König91404fb2015-08-05 18:33:21 +0200149int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
150 struct amd_sched_entity *entity,
Christian König432a4ff2015-08-12 11:46:04 +0200151 struct amd_sched_rq *rq,
Christian König91404fb2015-08-05 18:33:21 +0200152 uint32_t jobs);
Christian König062c7fb2015-08-21 15:46:43 +0200153void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
154 struct amd_sched_entity *entity);
Christian Könige2840222015-11-05 19:49:48 +0100155void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800156
Christian Königc24784f2016-10-28 17:04:07 +0200157int amd_sched_fence_slab_init(void);
158void amd_sched_fence_slab_fini(void);
159
Chunming Zhouf556cb0c2015-08-02 11:18:04 +0800160struct amd_sched_fence *amd_sched_fence_create(
Chunming Zhou84f76ea2015-08-24 12:47:36 +0800161 struct amd_sched_entity *s_entity, void *owner);
Christian König393a0bd2015-11-05 12:57:10 +0100162void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
Christian König6fc13672016-05-20 12:53:52 +0200163void amd_sched_fence_finished(struct amd_sched_fence *fence);
Monk Liue6869412016-03-07 12:49:55 +0800164int amd_sched_job_init(struct amd_sched_job *job,
Christian König16a71332016-05-18 09:43:07 +0200165 struct amd_gpu_scheduler *sched,
166 struct amd_sched_entity *entity,
Christian König595a9cd2016-06-30 10:52:03 +0200167 void *owner);
Chunming Zhoue686e752016-06-30 11:30:37 +0800168void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched);
Chunming Zhouec75f572016-06-29 15:23:55 +0800169void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
Chunming Zhou30514de2017-05-09 13:39:40 +0800170bool amd_sched_dependency_optimized(struct dma_fence* fence,
171 struct amd_sched_entity *entity);
Monk Liu65781c72017-05-11 13:36:44 +0800172void amd_sched_job_kickout(struct amd_sched_job *s_job);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800173#endif