blob: f8d46b0b4e3b8e4563998c571945e3ee6db8fdc1 [file] [log] [blame]
Jammy Zhoua72ce6f2015-05-22 18:55:07 +08001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <drm/drmP.h>
28#include "gpu_scheduler.h"
29
30/* Initialize a given run queue struct */
Christian König432a4ff2015-08-12 11:46:04 +020031static void amd_sched_rq_init(struct amd_sched_rq *rq)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080032{
Christian König2b184d82015-08-18 14:41:25 +020033 spin_lock_init(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020034 INIT_LIST_HEAD(&rq->entities);
Christian König432a4ff2015-08-12 11:46:04 +020035 rq->current_entity = NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080036}
37
Christian König432a4ff2015-08-12 11:46:04 +020038static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
39 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080040{
Christian König2b184d82015-08-18 14:41:25 +020041 spin_lock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020042 list_add_tail(&entity->list, &rq->entities);
Christian König2b184d82015-08-18 14:41:25 +020043 spin_unlock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020044}
45
46static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
47 struct amd_sched_entity *entity)
48{
Christian König2b184d82015-08-18 14:41:25 +020049 spin_lock(&rq->lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080050 list_del_init(&entity->list);
Christian König432a4ff2015-08-12 11:46:04 +020051 if (rq->current_entity == entity)
52 rq->current_entity = NULL;
Christian König2b184d82015-08-18 14:41:25 +020053 spin_unlock(&rq->lock);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080054}
55
56/**
57 * Select next entity from a specified run queue with round robin policy.
58 * It could return the same entity as current one if current is the only
59 * available one in the queue. Return NULL if nothing available.
60 */
Christian König432a4ff2015-08-12 11:46:04 +020061static struct amd_sched_entity *
62amd_sched_rq_select_entity(struct amd_sched_rq *rq)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080063{
Christian König2b184d82015-08-18 14:41:25 +020064 struct amd_sched_entity *entity;
Christian König4cd7f42c2015-08-05 18:18:52 +020065
Christian König2b184d82015-08-18 14:41:25 +020066 spin_lock(&rq->lock);
67
68 entity = rq->current_entity;
Christian König432a4ff2015-08-12 11:46:04 +020069 if (entity) {
70 list_for_each_entry_continue(entity, &rq->entities, list) {
71 if (!kfifo_is_empty(&entity->job_queue)) {
72 rq->current_entity = entity;
Christian König2b184d82015-08-18 14:41:25 +020073 spin_unlock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020074 return rq->current_entity;
75 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080076 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080077 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080078
Christian König432a4ff2015-08-12 11:46:04 +020079 list_for_each_entry(entity, &rq->entities, list) {
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080080
Christian König432a4ff2015-08-12 11:46:04 +020081 if (!kfifo_is_empty(&entity->job_queue)) {
82 rq->current_entity = entity;
Christian König2b184d82015-08-18 14:41:25 +020083 spin_unlock(&rq->lock);
Christian König432a4ff2015-08-12 11:46:04 +020084 return rq->current_entity;
85 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080086
Christian König432a4ff2015-08-12 11:46:04 +020087 if (entity == rq->current_entity)
88 break;
89 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080090
Christian König2b184d82015-08-18 14:41:25 +020091 spin_unlock(&rq->lock);
92
Christian König432a4ff2015-08-12 11:46:04 +020093 return NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +080094}
95
96/**
97 * Note: This function should only been called inside scheduler main
98 * function for thread safety, there is no other protection here.
99 * return ture if scheduler has something ready to run.
100 *
101 * For active_hw_rq, there is only one producer(scheduler thread) and
102 * one consumer(ISR). It should be safe to use this function in scheduler
103 * main thread to decide whether to continue emit more IBs.
104*/
105static bool is_scheduler_ready(struct amd_gpu_scheduler *sched)
106{
Chunming Zhou4cef9262015-08-05 19:52:14 +0800107 unsigned long flags;
108 bool full;
Christian König4cd7f42c2015-08-05 18:18:52 +0200109
Chunming Zhou4cef9262015-08-05 19:52:14 +0800110 spin_lock_irqsave(&sched->queue_lock, flags);
111 full = atomic64_read(&sched->hw_rq_count) <
112 sched->hw_submission_limit ? true : false;
113 spin_unlock_irqrestore(&sched->queue_lock, flags);
114
115 return full;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800116}
117
118/**
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800119 * Select next entity containing real IB submissions
120*/
Christian König91404fb2015-08-05 18:33:21 +0200121static struct amd_sched_entity *
Christian König4cd7f42c2015-08-05 18:18:52 +0200122select_context(struct amd_gpu_scheduler *sched)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800123{
Christian König91404fb2015-08-05 18:33:21 +0200124 struct amd_sched_entity *wake_entity = NULL;
125 struct amd_sched_entity *tmp;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800126
127 if (!is_scheduler_ready(sched))
128 return NULL;
129
130 /* Kernel run queue has higher priority than normal run queue*/
Christian König2b184d82015-08-18 14:41:25 +0200131 tmp = amd_sched_rq_select_entity(&sched->kernel_rq);
132 if (tmp == NULL)
133 tmp = amd_sched_rq_select_entity(&sched->sched_rq);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800134
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800135 if (sched->current_entity && (sched->current_entity != tmp))
136 wake_entity = sched->current_entity;
137 sched->current_entity = tmp;
Chunming Zhou1c8f8052015-08-13 13:04:06 +0800138 if (wake_entity && wake_entity->need_wakeup)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800139 wake_up(&wake_entity->wait_queue);
140 return tmp;
141}
142
143/**
144 * Init a context entity used by scheduler when submit to HW ring.
145 *
146 * @sched The pointer to the scheduler
Christian König91404fb2015-08-05 18:33:21 +0200147 * @entity The pointer to a valid amd_sched_entity
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800148 * @rq The run queue this entity belongs
Christian König0e89d0c2015-08-04 16:58:36 +0200149 * @kernel If this is an entity for the kernel
Jammy Zhou1333f722015-07-30 16:36:58 +0800150 * @jobs The max number of jobs in the job queue
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800151 *
152 * return 0 if succeed. negative error code on failure
153*/
Christian König91404fb2015-08-05 18:33:21 +0200154int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
Christian König6f0e54a2015-08-05 21:22:10 +0200155 struct amd_sched_entity *entity,
Christian König432a4ff2015-08-12 11:46:04 +0200156 struct amd_sched_rq *rq,
Christian König6f0e54a2015-08-05 21:22:10 +0200157 uint32_t jobs)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800158{
Chunming Zhouf556cb0c2015-08-02 11:18:04 +0800159 char name[20];
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800160
161 if (!(sched && entity && rq))
162 return -EINVAL;
163
Christian König91404fb2015-08-05 18:33:21 +0200164 memset(entity, 0, sizeof(struct amd_sched_entity));
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800165 spin_lock_init(&entity->lock);
Christian König91404fb2015-08-05 18:33:21 +0200166 entity->belongto_rq = rq;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800167 entity->scheduler = sched;
168 init_waitqueue_head(&entity->wait_queue);
169 init_waitqueue_head(&entity->wait_emit);
Chunming Zhouf556cb0c2015-08-02 11:18:04 +0800170 entity->fence_context = fence_context_alloc(1);
171 snprintf(name, sizeof(name), "c_entity[%llu]", entity->fence_context);
172 memcpy(entity->name, name, 20);
Chunming Zhou1c8f8052015-08-13 13:04:06 +0800173 entity->need_wakeup = false;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800174 if(kfifo_alloc(&entity->job_queue,
Jammy Zhou1333f722015-07-30 16:36:58 +0800175 jobs * sizeof(void *),
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800176 GFP_KERNEL))
177 return -EINVAL;
178
179 spin_lock_init(&entity->queue_lock);
Christian Königce882e62015-08-19 15:00:55 +0200180 atomic_set(&entity->fence_seq, 0);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800181
182 /* Add the entity to the run queue */
Christian König432a4ff2015-08-12 11:46:04 +0200183 amd_sched_rq_add_entity(rq, entity);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800184 return 0;
185}
186
187/**
188 * Query if entity is initialized
189 *
190 * @sched Pointer to scheduler instance
191 * @entity The pointer to a valid scheduler entity
192 *
193 * return true if entity is initialized, false otherwise
194*/
195static bool is_context_entity_initialized(struct amd_gpu_scheduler *sched,
Christian König91404fb2015-08-05 18:33:21 +0200196 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800197{
198 return entity->scheduler == sched &&
Christian König91404fb2015-08-05 18:33:21 +0200199 entity->belongto_rq != NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800200}
201
202static bool is_context_entity_idle(struct amd_gpu_scheduler *sched,
Christian König91404fb2015-08-05 18:33:21 +0200203 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800204{
205 /**
206 * Idle means no pending IBs, and the entity is not
207 * currently being used.
208 */
209 barrier();
210 if ((sched->current_entity != entity) &&
211 kfifo_is_empty(&entity->job_queue))
212 return true;
213
214 return false;
215}
216
217/**
218 * Destroy a context entity
219 *
220 * @sched Pointer to scheduler instance
221 * @entity The pointer to a valid scheduler entity
222 *
223 * return 0 if succeed. negative error code on failure
224 */
Christian König91404fb2015-08-05 18:33:21 +0200225int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
226 struct amd_sched_entity *entity)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800227{
228 int r = 0;
Christian König432a4ff2015-08-12 11:46:04 +0200229 struct amd_sched_rq *rq = entity->belongto_rq;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800230
231 if (!is_context_entity_initialized(sched, entity))
232 return 0;
Chunming Zhou1c8f8052015-08-13 13:04:06 +0800233 entity->need_wakeup = true;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800234 /**
235 * The client will not queue more IBs during this fini, consume existing
236 * queued IBs
237 */
238 r = wait_event_timeout(
239 entity->wait_queue,
240 is_context_entity_idle(sched, entity),
241 msecs_to_jiffies(AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS)
242 ) ? 0 : -1;
243
244 if (r) {
245 if (entity->is_pending)
Christian König0e89d0c2015-08-04 16:58:36 +0200246 DRM_INFO("Entity %p is in waiting state during fini,\
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800247 all pending ibs will be canceled.\n",
Christian König0e89d0c2015-08-04 16:58:36 +0200248 entity);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800249 }
250
Christian König432a4ff2015-08-12 11:46:04 +0200251 amd_sched_rq_remove_entity(rq, entity);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800252 kfifo_free(&entity->job_queue);
253 return r;
254}
255
256/**
257 * Submit a normal job to the job queue
258 *
259 * @sched The pointer to the scheduler
Christian König91404fb2015-08-05 18:33:21 +0200260 * @c_entity The pointer to amd_sched_entity
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800261 * @job The pointer to job required to submit
Chunming Zhou80de5912015-08-05 19:07:08 +0800262 * return 0 if succeed. -1 if failed.
263 * -2 indicate queue is full for this client, client should wait untill
264 * scheduler consum some queued command.
265 * -1 other fail.
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800266*/
Chunming Zhoubb977d32015-08-18 15:16:40 +0800267int amd_sched_push_job(struct amd_sched_job *sched_job)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800268{
Chunming Zhoubb977d32015-08-18 15:16:40 +0800269 struct amd_sched_fence *fence =
270 amd_sched_fence_create(sched_job->s_entity);
Chunming Zhouf556cb0c2015-08-02 11:18:04 +0800271 if (!fence)
272 return -EINVAL;
Chunming Zhoubb977d32015-08-18 15:16:40 +0800273 fence_get(&fence->base);
274 sched_job->s_fence = fence;
275 while (kfifo_in_spinlocked(&sched_job->s_entity->job_queue,
276 &sched_job, sizeof(void *),
277 &sched_job->s_entity->queue_lock) !=
278 sizeof(void *)) {
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800279 /**
280 * Current context used up all its IB slots
281 * wait here, or need to check whether GPU is hung
282 */
283 schedule();
284 }
Chunming Zhou1c8f8052015-08-13 13:04:06 +0800285 /* first job wake up scheduler */
Chunming Zhoubb977d32015-08-18 15:16:40 +0800286 if ((kfifo_len(&sched_job->s_entity->job_queue) / sizeof(void *)) == 1)
287 wake_up_interruptible(&sched_job->sched->wait_queue);
Chunming Zhou80de5912015-08-05 19:07:08 +0800288 return 0;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800289}
290
Christian König6f0e54a2015-08-05 21:22:10 +0200291static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
292{
293 struct amd_sched_job *sched_job =
294 container_of(cb, struct amd_sched_job, cb);
295 struct amd_gpu_scheduler *sched;
296 unsigned long flags;
297
298 sched = sched_job->sched;
Chunming Zhouf556cb0c2015-08-02 11:18:04 +0800299 amd_sched_fence_signal(sched_job->s_fence);
Christian König6f0e54a2015-08-05 21:22:10 +0200300 spin_lock_irqsave(&sched->queue_lock, flags);
301 list_del(&sched_job->list);
302 atomic64_dec(&sched->hw_rq_count);
303 spin_unlock_irqrestore(&sched->queue_lock, flags);
Chunming Zhouf556cb0c2015-08-02 11:18:04 +0800304 fence_put(&sched_job->s_fence->base);
Chunming Zhoubb977d32015-08-18 15:16:40 +0800305 sched->ops->process_job(sched, sched_job);
Christian König6f0e54a2015-08-05 21:22:10 +0200306 wake_up_interruptible(&sched->wait_queue);
307}
308
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800309static int amd_sched_main(void *param)
310{
311 int r;
Chunming Zhou953e8fd2015-08-06 15:19:12 +0800312 struct amd_sched_job *job;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800313 struct sched_param sparam = {.sched_priority = 1};
Christian König91404fb2015-08-05 18:33:21 +0200314 struct amd_sched_entity *c_entity = NULL;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800315 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
316
317 sched_setscheduler(current, SCHED_FIFO, &sparam);
318
319 while (!kthread_should_stop()) {
Christian König6f0e54a2015-08-05 21:22:10 +0200320 struct fence *fence;
321
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800322 wait_event_interruptible(sched->wait_queue,
323 is_scheduler_ready(sched) &&
324 (c_entity = select_context(sched)));
325 r = kfifo_out(&c_entity->job_queue, &job, sizeof(void *));
326 if (r != sizeof(void *))
327 continue;
Chunming Zhoubb977d32015-08-18 15:16:40 +0800328 r = 0;
329 if (sched->ops->prepare_job)
330 r = sched->ops->prepare_job(sched, c_entity, job);
Chunming Zhou4cef9262015-08-05 19:52:14 +0800331 if (!r) {
332 unsigned long flags;
Chunming Zhou4cef9262015-08-05 19:52:14 +0800333 spin_lock_irqsave(&sched->queue_lock, flags);
Chunming Zhou953e8fd2015-08-06 15:19:12 +0800334 list_add_tail(&job->list, &sched->active_hw_rq);
Chunming Zhou4cef9262015-08-05 19:52:14 +0800335 atomic64_inc(&sched->hw_rq_count);
336 spin_unlock_irqrestore(&sched->queue_lock, flags);
337 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800338 mutex_lock(&sched->sched_lock);
Chunming Zhou953e8fd2015-08-06 15:19:12 +0800339 fence = sched->ops->run_job(sched, c_entity, job);
Christian König6f0e54a2015-08-05 21:22:10 +0200340 if (fence) {
Chunming Zhou953e8fd2015-08-06 15:19:12 +0800341 r = fence_add_callback(fence, &job->cb,
Christian König6f0e54a2015-08-05 21:22:10 +0200342 amd_sched_process_job);
343 if (r == -ENOENT)
Chunming Zhou953e8fd2015-08-06 15:19:12 +0800344 amd_sched_process_job(fence, &job->cb);
Christian König6f0e54a2015-08-05 21:22:10 +0200345 else if (r)
346 DRM_ERROR("fence add callback failed (%d)\n", r);
347 fence_put(fence);
348 }
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800349 mutex_unlock(&sched->sched_lock);
350 }
351 return 0;
352}
353
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800354/**
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800355 * Create a gpu scheduler
356 *
357 * @device The device context for this scheduler
358 * @ops The backend operations for this scheduler.
359 * @id The scheduler is per ring, here is ring id.
360 * @granularity The minumum ms unit the scheduler will scheduled.
361 * @preemption Indicate whether this ring support preemption, 0 is no.
362 *
363 * return the pointer to scheduler for success, otherwise return NULL
364*/
365struct amd_gpu_scheduler *amd_sched_create(void *device,
366 struct amd_sched_backend_ops *ops,
367 unsigned ring,
368 unsigned granularity,
Jammy Zhou4afcb302015-07-30 16:44:05 +0800369 unsigned preemption,
370 unsigned hw_submission)
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800371{
372 struct amd_gpu_scheduler *sched;
Christian König4cd7f42c2015-08-05 18:18:52 +0200373 char name[20];
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800374
375 sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
376 if (!sched)
377 return NULL;
378
379 sched->device = device;
380 sched->ops = ops;
381 sched->granularity = granularity;
382 sched->ring_id = ring;
383 sched->preemption = preemption;
Chunming Zhou4cef9262015-08-05 19:52:14 +0800384 sched->hw_submission_limit = hw_submission;
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800385 snprintf(name, sizeof(name), "gpu_sched[%d]", ring);
386 mutex_init(&sched->sched_lock);
387 spin_lock_init(&sched->queue_lock);
Christian König432a4ff2015-08-12 11:46:04 +0200388 amd_sched_rq_init(&sched->sched_rq);
389 amd_sched_rq_init(&sched->kernel_rq);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800390
391 init_waitqueue_head(&sched->wait_queue);
Chunming Zhou4cef9262015-08-05 19:52:14 +0800392 INIT_LIST_HEAD(&sched->active_hw_rq);
393 atomic64_set(&sched->hw_rq_count, 0);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800394 /* Each scheduler will run on a seperate kernel thread */
395 sched->thread = kthread_create(amd_sched_main, sched, name);
396 if (sched->thread) {
397 wake_up_process(sched->thread);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800398 return sched;
399 }
400
401 DRM_ERROR("Failed to create scheduler for id %d.\n", ring);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800402 kfree(sched);
403 return NULL;
404}
405
406/**
407 * Destroy a gpu scheduler
408 *
409 * @sched The pointer to the scheduler
410 *
411 * return 0 if succeed. -1 if failed.
412 */
413int amd_sched_destroy(struct amd_gpu_scheduler *sched)
414{
415 kthread_stop(sched->thread);
Jammy Zhoua72ce6f2015-05-22 18:55:07 +0800416 kfree(sched);
417 return 0;
418}