blob: 58408da122c5b6251b12db85b20688bfa2a8ec5c [file] [log] [blame]
Chunming Zhouc1b69ed2015-07-21 13:45:14 +08001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <drm/drmP.h>
28#include "amdgpu.h"
29
Junwei Zhang4c7eb912015-09-09 09:05:55 +080030static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
Christian Könige61235d2015-08-25 11:05:36 +020031{
Junwei Zhanga6db8a32015-09-09 09:21:19 +080032 struct amdgpu_job *job = to_amdgpu_job(sched_job);
Junwei Zhang4c7eb912015-09-09 09:05:55 +080033 return amdgpu_sync_get_fence(&job->ibs->sync);
Christian Könige61235d2015-08-25 11:05:36 +020034}
35
Junwei Zhang4c7eb912015-09-09 09:05:55 +080036static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
Chunming Zhouc1b69ed2015-07-21 13:45:14 +080037{
Christian König1886d1a2015-08-31 17:28:28 +020038 struct amdgpu_fence *fence = NULL;
Junwei Zhang4c7eb912015-09-09 09:05:55 +080039 struct amdgpu_job *job;
Christian Königbd755d02015-08-24 14:57:26 +020040 int r;
Chunming Zhouc1b69ed2015-07-21 13:45:14 +080041
Junwei Zhang4c7eb912015-09-09 09:05:55 +080042 if (!sched_job) {
Chunming Zhou4cef9262015-08-05 19:52:14 +080043 DRM_ERROR("job is null\n");
Christian König6f0e54a2015-08-05 21:22:10 +020044 return NULL;
Chunming Zhou4cef9262015-08-05 19:52:14 +080045 }
Junwei Zhanga6db8a32015-09-09 09:21:19 +080046 job = to_amdgpu_job(sched_job);
Junwei Zhang4c7eb912015-09-09 09:05:55 +080047 mutex_lock(&job->job_lock);
48 r = amdgpu_ib_schedule(job->adev,
49 job->num_ibs,
50 job->ibs,
51 job->base.owner);
Christian König1886d1a2015-08-31 17:28:28 +020052 if (r) {
53 DRM_ERROR("Error scheduling IBs (%d)\n", r);
Chunming Zhouc1b69ed2015-07-21 13:45:14 +080054 goto err;
Christian König1886d1a2015-08-31 17:28:28 +020055 }
56
Junwei Zhang4c7eb912015-09-09 09:05:55 +080057 fence = amdgpu_fence_ref(job->ibs[job->num_ibs - 1].fence);
Chunming Zhou74846672015-08-04 11:30:09 +080058
Christian König1886d1a2015-08-31 17:28:28 +020059err:
Junwei Zhang4c7eb912015-09-09 09:05:55 +080060 if (job->free_job)
61 job->free_job(job);
Christian Königbf7ebae2015-08-18 15:30:26 +020062
Junwei Zhang4c7eb912015-09-09 09:05:55 +080063 mutex_unlock(&job->job_lock);
64 fence_put(&job->base.s_fence->base);
65 kfree(job);
Christian König1886d1a2015-08-31 17:28:28 +020066 return fence ? &fence->base : NULL;
Chunming Zhouc1b69ed2015-07-21 13:45:14 +080067}
68
69struct amd_sched_backend_ops amdgpu_sched_ops = {
Christian Könige61235d2015-08-25 11:05:36 +020070 .dependency = amdgpu_sched_dependency,
Chunming Zhouc1b69ed2015-07-21 13:45:14 +080071 .run_job = amdgpu_sched_run_job,
Chunming Zhouc1b69ed2015-07-21 13:45:14 +080072};
73
Chunming Zhou3c704e92015-07-29 10:33:14 +080074int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
75 struct amdgpu_ring *ring,
76 struct amdgpu_ib *ibs,
77 unsigned num_ibs,
Chunming Zhoubb977d32015-08-18 15:16:40 +080078 int (*free_job)(struct amdgpu_job *),
Chunming Zhou17635522015-08-03 11:43:19 +080079 void *owner,
80 struct fence **f)
Chunming Zhou3c704e92015-07-29 10:33:14 +080081{
82 int r = 0;
83 if (amdgpu_enable_scheduler) {
Chunming Zhoubb977d32015-08-18 15:16:40 +080084 struct amdgpu_job *job =
85 kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
86 if (!job)
Chunming Zhou3c704e92015-07-29 10:33:14 +080087 return -ENOMEM;
Chunming Zhoubb977d32015-08-18 15:16:40 +080088 job->base.sched = ring->scheduler;
89 job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
90 job->adev = adev;
91 job->ibs = ibs;
92 job->num_ibs = num_ibs;
Chunming Zhou84f76ea2015-08-24 12:47:36 +080093 job->base.owner = owner;
Chunming Zhoubb977d32015-08-18 15:16:40 +080094 mutex_init(&job->job_lock);
95 job->free_job = free_job;
96 mutex_lock(&job->job_lock);
Junwei Zhanga6db8a32015-09-09 09:21:19 +080097 r = amd_sched_entity_push_job(&job->base);
Chunming Zhouf556cb0c2015-08-02 11:18:04 +080098 if (r) {
Chunming Zhoubb977d32015-08-18 15:16:40 +080099 mutex_unlock(&job->job_lock);
100 kfree(job);
Chunming Zhouf556cb0c2015-08-02 11:18:04 +0800101 return r;
102 }
Chunming Zhoubb977d32015-08-18 15:16:40 +0800103 *f = fence_get(&job->base.s_fence->base);
104 mutex_unlock(&job->job_lock);
Chunming Zhouf556cb0c2015-08-02 11:18:04 +0800105 } else {
Chunming Zhou4af9f072015-08-03 12:57:31 +0800106 r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
Chunming Zhouf556cb0c2015-08-02 11:18:04 +0800107 if (r)
108 return r;
Chunming Zhou281b4222015-08-12 12:58:31 +0800109 *f = fence_get(&ibs[num_ibs - 1].fence->base);
Chunming Zhouf556cb0c2015-08-02 11:18:04 +0800110 }
Chunming Zhou3c623382015-08-20 18:33:59 +0800111
Chunming Zhou17635522015-08-03 11:43:19 +0800112 return 0;
Chunming Zhou3c704e92015-07-29 10:33:14 +0800113}