blob: 690ae4b0c67354e0ba63649c67f60e866de56040 [file] [log] [blame]
Jammy Zhoua72ce6f2015-05-22 18:55:07 +08001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _GPU_SCHEDULER_H_
25#define _GPU_SCHEDULER_H_
26
27#include <linux/kfifo.h>
Chunming Zhou4cef9262015-08-05 19:52:14 +080028#include <linux/fence.h>
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080029
Christian König393a0bd2015-11-05 12:57:10 +010030#define AMD_SCHED_FENCE_SCHEDULED_BIT FENCE_FLAG_USER_BITS
31
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080032struct amd_gpu_scheduler;
Christian König432a4ff2015-08-12 11:46:04 +020033struct amd_sched_rq;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080034
Chunming Zhouf5617f92015-11-05 11:41:50 +080035extern struct kmem_cache *sched_fence_slab;
36extern atomic_t sched_fence_slab_ref;
37
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080038/**
39 * A scheduler entity is a wrapper around a job queue or a group
Monk Liue472d252016-03-03 19:00:50 +080040 * of other entities. Entities take turns emitting jobs from their
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080041 * job queues to corresponding hardware ring based on scheduling
42 * policy.
43*/
44struct amd_sched_entity {
45 struct list_head list;
Christian König0f75aee2015-09-07 18:07:14 +020046 struct amd_sched_rq *rq;
47 struct amd_gpu_scheduler *sched;
48
Christian König91404fb2015-08-05 18:33:21 +020049 spinlock_t queue_lock;
Christian König0f75aee2015-09-07 18:07:14 +020050 struct kfifo job_queue;
51
52 atomic_t fence_seq;
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080053 uint64_t fence_context;
Christian König0f75aee2015-09-07 18:07:14 +020054
Christian Könige61235d2015-08-25 11:05:36 +020055 struct fence *dependency;
56 struct fence_cb cb;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080057};
58
59/**
60 * Run queue is a set of entities scheduling command submissions for
61 * one specific ring. It implements the scheduling policy that selects
62 * the next entity to emit commands from.
63*/
Christian König432a4ff2015-08-12 11:46:04 +020064struct amd_sched_rq {
Christian König2b184d82015-08-18 14:41:25 +020065 spinlock_t lock;
Christian König432a4ff2015-08-12 11:46:04 +020066 struct list_head entities;
67 struct amd_sched_entity *current_entity;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080068};
69
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080070struct amd_sched_fence {
71 struct fence base;
Christian König258f3f92015-08-31 17:02:52 +020072 struct fence_cb cb;
Christian König393a0bd2015-11-05 12:57:10 +010073 struct list_head scheduled_cb;
Christian König9b398fa2015-09-07 18:16:49 +020074 struct amd_gpu_scheduler *sched;
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080075 spinlock_t lock;
Chunming Zhou84f76ea2015-08-24 12:47:36 +080076 void *owner;
Christian Königedf600d2016-05-03 15:54:54 +020077 struct amd_sched_job *s_job;
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080078};
79
Chunming Zhou4cef9262015-08-05 19:52:14 +080080struct amd_sched_job {
Monk Liub6723c82016-03-10 12:14:44 +080081 struct kref refcount;
Chunming Zhou4cef9262015-08-05 19:52:14 +080082 struct amd_gpu_scheduler *sched;
Chunming Zhou953e8fd2015-08-06 15:19:12 +080083 struct amd_sched_entity *s_entity;
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080084 struct amd_sched_fence *s_fence;
Monk Liue472d252016-03-03 19:00:50 +080085 bool use_sched; /* true if the job goes to scheduler */
86 struct fence_cb cb_free_job;
87 struct work_struct work_free_job;
Monk Liu48350962016-03-04 14:33:44 +080088 struct list_head node;
Monk Liu0de24792016-03-04 18:51:02 +080089 struct delayed_work work_tdr;
90 void (*timeout_callback) (struct work_struct *work);
Monk Liub6723c82016-03-10 12:14:44 +080091 void (*free_callback)(struct kref *refcount);
Chunming Zhou4cef9262015-08-05 19:52:14 +080092};
93
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080094extern const struct fence_ops amd_sched_fence_ops;
95static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
96{
Christian König16a71332016-05-18 09:43:07 +020097 struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence,
98 base);
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080099
100 if (__f->base.ops == &amd_sched_fence_ops)
101 return __f;
102
103 return NULL;
104}
105
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800106/**
107 * Define the backend operations called by the scheduler,
108 * these functions should be implemented in driver side
109*/
110struct amd_sched_backend_ops {
Junwei Zhang4c7eb912015-09-09 09:05:55 +0800111 struct fence *(*dependency)(struct amd_sched_job *sched_job);
112 struct fence *(*run_job)(struct amd_sched_job *sched_job);
Monk Liu0de24792016-03-04 18:51:02 +0800113 void (*begin_job)(struct amd_sched_job *sched_job);
114 void (*finish_job)(struct amd_sched_job *sched_job);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800115};
116
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800117enum amd_sched_priority {
118 AMD_SCHED_PRIORITY_KERNEL = 0,
119 AMD_SCHED_PRIORITY_NORMAL,
120 AMD_SCHED_MAX_PRIORITY
121};
122
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800123/**
124 * One scheduler is implemented for each hardware ring
125*/
126struct amd_gpu_scheduler {
Nils Wallménius62250a92016-04-10 16:30:00 +0200127 const struct amd_sched_backend_ops *ops;
Christian König4f839a22015-09-08 20:22:31 +0200128 uint32_t hw_submission_limit;
Junwei Zhang2440ff22015-10-10 08:48:42 +0800129 long timeout;
Christian König4f839a22015-09-08 20:22:31 +0200130 const char *name;
Chunming Zhoud033a6d2015-11-05 15:23:09 +0800131 struct amd_sched_rq sched_rq[AMD_SCHED_MAX_PRIORITY];
Christian Königc2b6bd72015-08-25 21:39:31 +0200132 wait_queue_head_t wake_up_worker;
133 wait_queue_head_t job_scheduled;
Christian König4f839a22015-09-08 20:22:31 +0200134 atomic_t hw_rq_count;
135 struct task_struct *thread;
Monk Liu48350962016-03-04 14:33:44 +0800136 struct list_head ring_mirror_list;
137 spinlock_t job_list_lock;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800138};
139
Christian König4f839a22015-09-08 20:22:31 +0200140int amd_sched_init(struct amd_gpu_scheduler *sched,
Nils Wallménius62250a92016-04-10 16:30:00 +0200141 const struct amd_sched_backend_ops *ops,
Junwei Zhang2440ff22015-10-10 08:48:42 +0800142 uint32_t hw_submission, long timeout, const char *name);
Christian König4f839a22015-09-08 20:22:31 +0200143void amd_sched_fini(struct amd_gpu_scheduler *sched);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800144
Christian König91404fb2015-08-05 18:33:21 +0200145int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
146 struct amd_sched_entity *entity,
Christian König432a4ff2015-08-12 11:46:04 +0200147 struct amd_sched_rq *rq,
Christian König91404fb2015-08-05 18:33:21 +0200148 uint32_t jobs);
Christian König062c7fb2015-08-21 15:46:43 +0200149void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
150 struct amd_sched_entity *entity);
Christian Könige2840222015-11-05 19:49:48 +0100151void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800152
Chunming Zhouf556cb0c2015-08-02 11:18:04 +0800153struct amd_sched_fence *amd_sched_fence_create(
Chunming Zhou84f76ea2015-08-24 12:47:36 +0800154 struct amd_sched_entity *s_entity, void *owner);
Christian König393a0bd2015-11-05 12:57:10 +0100155void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
Chunming Zhouf556cb0c2015-08-02 11:18:04 +0800156void amd_sched_fence_signal(struct amd_sched_fence *fence);
Monk Liue6869412016-03-07 12:49:55 +0800157int amd_sched_job_init(struct amd_sched_job *job,
Christian König16a71332016-05-18 09:43:07 +0200158 struct amd_gpu_scheduler *sched,
159 struct amd_sched_entity *entity,
160 void (*timeout_cb)(struct work_struct *work),
161 void (*free_cb)(struct kref* refcount),
162 void *owner, struct fence **fence);
Monk Liu48350962016-03-04 14:33:44 +0800163void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
Christian König16a71332016-05-18 09:43:07 +0200164 struct amd_sched_job *s_job);
Monk Liu0de24792016-03-04 18:51:02 +0800165void amd_sched_job_finish(struct amd_sched_job *s_job);
166void amd_sched_job_begin(struct amd_sched_job *s_job);
Christian König16a71332016-05-18 09:43:07 +0200167static inline void amd_sched_job_get(struct amd_sched_job *job)
168{
Monk Liub6723c82016-03-10 12:14:44 +0800169 if (job)
170 kref_get(&job->refcount);
171}
172
Christian König16a71332016-05-18 09:43:07 +0200173static inline void amd_sched_job_put(struct amd_sched_job *job)
174{
Monk Liub6723c82016-03-10 12:14:44 +0800175 if (job)
176 kref_put(&job->refcount, job->free_callback);
177}
178
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800179#endif