blob: 6bdc9b7169d2946744714e233284751071d3aaa1 [file] [log] [blame]
Chunming Zhouf556cb0c2015-08-02 11:18:04 +08001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <drm/drmP.h>
28#include "gpu_scheduler.h"
29
Christian König16a71332016-05-18 09:43:07 +020030struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
31 void *owner)
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080032{
33 struct amd_sched_fence *fence = NULL;
Christian Königce882e62015-08-19 15:00:55 +020034 unsigned seq;
35
Chunming Zhouf5617f92015-11-05 11:41:50 +080036 fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080037 if (fence == NULL)
38 return NULL;
Christian König393a0bd2015-11-05 12:57:10 +010039
40 INIT_LIST_HEAD(&fence->scheduled_cb);
Chunming Zhou84f76ea2015-08-24 12:47:36 +080041 fence->owner = owner;
Christian König16a71332016-05-18 09:43:07 +020042 fence->sched = entity->sched;
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080043 spin_lock_init(&fence->lock);
Christian Königce882e62015-08-19 15:00:55 +020044
Christian König16a71332016-05-18 09:43:07 +020045 seq = atomic_inc_return(&entity->fence_seq);
Christian Königce882e62015-08-19 15:00:55 +020046 fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock,
Christian König16a71332016-05-18 09:43:07 +020047 entity->fence_context, seq);
Christian Königce882e62015-08-19 15:00:55 +020048
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080049 return fence;
50}
51
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080052void amd_sched_fence_signal(struct amd_sched_fence *fence)
53{
Christian König2983e5c2015-08-10 14:20:55 +020054 int ret = fence_signal(&fence->base);
55 if (!ret)
56 FENCE_TRACE(&fence->base, "signaled from irq context\n");
57 else
58 FENCE_TRACE(&fence->base, "was already signaled\n");
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080059}
60
Monk Liu48350962016-03-04 14:33:44 +080061void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
62 struct amd_sched_job *s_job)
63{
64 unsigned long flags;
65 spin_lock_irqsave(&sched->job_list_lock, flags);
66 list_add_tail(&s_job->node, &sched->ring_mirror_list);
Monk Liu0de24792016-03-04 18:51:02 +080067 sched->ops->begin_job(s_job);
Monk Liu48350962016-03-04 14:33:44 +080068 spin_unlock_irqrestore(&sched->job_list_lock, flags);
69}
70
Christian König393a0bd2015-11-05 12:57:10 +010071void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence)
72{
73 struct fence_cb *cur, *tmp;
74
75 set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags);
76 list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) {
77 list_del_init(&cur->node);
78 cur->func(&s_fence->base, cur);
79 }
80}
81
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080082static const char *amd_sched_fence_get_driver_name(struct fence *fence)
83{
84 return "amd_sched";
85}
86
87static const char *amd_sched_fence_get_timeline_name(struct fence *f)
88{
89 struct amd_sched_fence *fence = to_amd_sched_fence(f);
Christian König9b398fa2015-09-07 18:16:49 +020090 return (const char *)fence->sched->name;
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080091}
92
93static bool amd_sched_fence_enable_signaling(struct fence *f)
94{
Christian König2983e5c2015-08-10 14:20:55 +020095 return true;
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080096}
97
Christian König189e0fb2016-03-15 13:58:14 +010098/**
99 * amd_sched_fence_free - free up the fence memory
100 *
101 * @rcu: RCU callback head
102 *
103 * Free up the fence memory after the RCU grace period.
104 */
105static void amd_sched_fence_free(struct rcu_head *rcu)
Chunming Zhouf5617f92015-11-05 11:41:50 +0800106{
Christian König189e0fb2016-03-15 13:58:14 +0100107 struct fence *f = container_of(rcu, struct fence, rcu);
Chunming Zhouf5617f92015-11-05 11:41:50 +0800108 struct amd_sched_fence *fence = to_amd_sched_fence(f);
109 kmem_cache_free(sched_fence_slab, fence);
110}
111
Christian König189e0fb2016-03-15 13:58:14 +0100112/**
113 * amd_sched_fence_release - callback that fence can be freed
114 *
115 * @fence: fence
116 *
117 * This function is called when the reference count becomes zero.
118 * It just RCU schedules freeing up the fence.
119 */
120static void amd_sched_fence_release(struct fence *f)
121{
122 call_rcu(&f->rcu, amd_sched_fence_free);
123}
124
Chunming Zhouf556cb0c2015-08-02 11:18:04 +0800125const struct fence_ops amd_sched_fence_ops = {
126 .get_driver_name = amd_sched_fence_get_driver_name,
127 .get_timeline_name = amd_sched_fence_get_timeline_name,
128 .enable_signaling = amd_sched_fence_enable_signaling,
Christian König2983e5c2015-08-10 14:20:55 +0200129 .signaled = NULL,
Chunming Zhouf556cb0c2015-08-02 11:18:04 +0800130 .wait = fence_default_wait,
Chunming Zhouf5617f92015-11-05 11:41:50 +0800131 .release = amd_sched_fence_release,
Chunming Zhouf556cb0c2015-08-02 11:18:04 +0800132};