blob: 1c1b627ee8436dc8e2fce9098d274183b8f18011 [file] [log] [blame]
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001/* sched.c - SPU scheduler.
2 *
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
5 *
Mark Nuttera68cf982006-10-04 17:26:12 +02006 * 2006-03-31 NUMA domains added.
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05007 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
Arnd Bergmann3b3d22c2005-12-05 22:52:24 -050023#undef DEBUG
24
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050025#include <linux/module.h>
26#include <linux/errno.h>
27#include <linux/sched.h>
28#include <linux/kernel.h>
29#include <linux/mm.h>
30#include <linux/completion.h>
31#include <linux/vmalloc.h>
32#include <linux/smp.h>
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050033#include <linux/stddef.h>
34#include <linux/unistd.h>
Mark Nuttera68cf982006-10-04 17:26:12 +020035#include <linux/numa.h>
36#include <linux/mutex.h>
Arnd Bergmann86767272006-10-04 17:26:21 +020037#include <linux/notifier.h>
Christoph Hellwig37901802007-06-29 10:57:51 +100038#include <linux/kthread.h>
Christoph Hellwig65de66f2007-06-29 10:58:02 +100039#include <linux/pid_namespace.h>
40#include <linux/proc_fs.h>
41#include <linux/seq_file.h>
Christoph Hellwig038200c2008-01-11 15:03:26 +110042#include <linux/marker.h>
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050043
44#include <asm/io.h>
45#include <asm/mmu_context.h>
46#include <asm/spu.h>
47#include <asm/spu_csa.h>
Geoff Levanda91942a2006-06-19 20:33:30 +020048#include <asm/spu_priv1.h>
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050049#include "spufs.h"
50
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050051struct spu_prio_array {
Christoph Hellwig72cb3602007-02-13 21:54:28 +010052 DECLARE_BITMAP(bitmap, MAX_PRIO);
Christoph Hellwig079cdb62007-02-13 21:54:23 +010053 struct list_head runq[MAX_PRIO];
54 spinlock_t runq_lock;
Christoph Hellwig65de66f2007-06-29 10:58:02 +100055 int nr_waiting;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050056};
57
Christoph Hellwig65de66f2007-06-29 10:58:02 +100058static unsigned long spu_avenrun[3];
Mark Nuttera68cf982006-10-04 17:26:12 +020059static struct spu_prio_array *spu_prio;
Christoph Hellwig37901802007-06-29 10:57:51 +100060static struct task_struct *spusched_task;
61static struct timer_list spusched_timer;
Aegis Lin90608a22007-12-20 16:39:59 +090062static struct timer_list spuloadavg_timer;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050063
Christoph Hellwigfe443ef2007-06-29 10:57:52 +100064/*
65 * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
66 */
67#define NORMAL_PRIO 120
68
69/*
70 * Frequency of the spu scheduler tick. By default we do one SPU scheduler
71 * tick for every 10 CPU scheduler ticks.
72 */
73#define SPUSCHED_TICK (10)
74
75/*
76 * These are the 'tuning knobs' of the scheduler:
77 *
Jeremy Kerr60e24232007-06-29 10:57:53 +100078 * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
79 * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
Christoph Hellwigfe443ef2007-06-29 10:57:52 +100080 */
Jeremy Kerr60e24232007-06-29 10:57:53 +100081#define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
82#define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK))
Christoph Hellwigfe443ef2007-06-29 10:57:52 +100083
84#define MAX_USER_PRIO (MAX_PRIO - MAX_RT_PRIO)
85#define SCALE_PRIO(x, prio) \
86 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
87
88/*
89 * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
90 * [800ms ... 100ms ... 5ms]
91 *
92 * The higher a thread's priority, the bigger timeslices
93 * it gets during one round of execution. But even the lowest
94 * priority thread gets MIN_TIMESLICE worth of execution time.
95 */
96void spu_set_timeslice(struct spu_context *ctx)
97{
98 if (ctx->prio < NORMAL_PRIO)
99 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
100 else
101 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
102}
103
Christoph Hellwig2cf2b3b2007-06-29 10:57:55 +1000104/*
105 * Update scheduling information from the owning thread.
106 */
107void __spu_update_sched_info(struct spu_context *ctx)
108{
109 /*
Luke Browning91569532007-12-20 16:39:59 +0900110 * assert that the context is not on the runqueue, so it is safe
111 * to change its scheduling parameters.
112 */
113 BUG_ON(!list_empty(&ctx->rq));
114
115 /*
Julio M. Merino Vidal9b1d21f2007-12-20 16:39:59 +0900116 * 32-Bit assignments are atomic on powerpc, and we don't care about
117 * memory ordering here because retrieving the controlling thread is
118 * per definition racy.
Christoph Hellwig476273a2007-06-29 10:58:01 +1000119 */
120 ctx->tid = current->pid;
121
122 /*
Christoph Hellwig2cf2b3b2007-06-29 10:57:55 +1000123 * We do our own priority calculations, so we normally want
Julio M. Merino Vidal9b1d21f2007-12-20 16:39:59 +0900124 * ->static_prio to start with. Unfortunately this field
Christoph Hellwig2cf2b3b2007-06-29 10:57:55 +1000125 * contains junk for threads with a realtime scheduling
126 * policy so we have to look at ->prio in this case.
127 */
128 if (rt_prio(current->prio))
129 ctx->prio = current->prio;
130 else
131 ctx->prio = current->static_prio;
132 ctx->policy = current->policy;
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000133
134 /*
Luke Browning91569532007-12-20 16:39:59 +0900135 * TO DO: the context may be loaded, so we may need to activate
136 * it again on a different node. But it shouldn't hurt anything
137 * to update its parameters, because we know that the scheduler
138 * is not actively looking at this field, since it is not on the
139 * runqueue. The context will be rescheduled on the proper node
140 * if it is timesliced or preempted.
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000141 */
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000142 ctx->cpus_allowed = current->cpus_allowed;
Luke Browning7a214202008-04-28 14:32:34 +1000143
144 /* Save the current cpu id for spu interrupt routing. */
145 ctx->last_ran = raw_smp_processor_id();
Christoph Hellwig2cf2b3b2007-06-29 10:57:55 +1000146}
147
148void spu_update_sched_info(struct spu_context *ctx)
149{
Luke Browning91569532007-12-20 16:39:59 +0900150 int node;
Christoph Hellwig2cf2b3b2007-06-29 10:57:55 +1000151
Luke Browning91569532007-12-20 16:39:59 +0900152 if (ctx->state == SPU_STATE_RUNNABLE) {
153 node = ctx->spu->node;
Luke Browninge65c2f62007-12-20 16:39:59 +0900154
155 /*
156 * Take list_mutex to sync with find_victim().
157 */
Luke Browning91569532007-12-20 16:39:59 +0900158 mutex_lock(&cbe_spu_info[node].list_mutex);
159 __spu_update_sched_info(ctx);
160 mutex_unlock(&cbe_spu_info[node].list_mutex);
161 } else {
162 __spu_update_sched_info(ctx);
163 }
Christoph Hellwig2cf2b3b2007-06-29 10:57:55 +1000164}
165
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000166static int __node_allowed(struct spu_context *ctx, int node)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500167{
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000168 if (nr_cpus_node(node)) {
169 cpumask_t mask = node_to_cpumask(node);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500170
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000171 if (cpus_intersects(mask, ctx->cpus_allowed))
172 return 1;
173 }
174
175 return 0;
176}
177
178static int node_allowed(struct spu_context *ctx, int node)
179{
180 int rval;
181
182 spin_lock(&spu_prio->runq_lock);
183 rval = __node_allowed(ctx, node);
184 spin_unlock(&spu_prio->runq_lock);
185
186 return rval;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500187}
188
Bob Nelsonaed3a8c2007-12-15 01:27:30 +1100189void do_notify_spus_active(void)
Bob Nelson36aaccc2007-07-20 21:39:52 +0200190{
191 int node;
192
193 /*
194 * Wake up the active spu_contexts.
195 *
196 * When the awakened processes see their "notify_active" flag is set,
Julio M. Merino Vidal9b1d21f2007-12-20 16:39:59 +0900197 * they will call spu_switch_notify().
Bob Nelson36aaccc2007-07-20 21:39:52 +0200198 */
199 for_each_online_node(node) {
200 struct spu *spu;
Christoph Hellwig486acd42007-07-20 21:39:54 +0200201
202 mutex_lock(&cbe_spu_info[node].list_mutex);
203 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
204 if (spu->alloc_state != SPU_FREE) {
205 struct spu_context *ctx = spu->ctx;
206 set_bit(SPU_SCHED_NOTIFY_ACTIVE,
207 &ctx->sched_flags);
208 mb();
209 wake_up_all(&ctx->stop_wq);
210 }
Bob Nelson36aaccc2007-07-20 21:39:52 +0200211 }
Christoph Hellwig486acd42007-07-20 21:39:54 +0200212 mutex_unlock(&cbe_spu_info[node].list_mutex);
Bob Nelson36aaccc2007-07-20 21:39:52 +0200213 }
214}
215
Christoph Hellwig202557d2007-02-13 21:36:49 +0100216/**
217 * spu_bind_context - bind spu context to physical spu
218 * @spu: physical spu to bind to
219 * @ctx: context to bind
220 */
221static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500222{
Christoph Hellwig038200c2008-01-11 15:03:26 +1100223 spu_context_trace(spu_bind_context__enter, ctx, spu);
224
Andre Detsch27ec41d2007-07-20 21:39:33 +0200225 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +1000226
Arnd Bergmannaa6d5b22007-07-20 21:39:44 +0200227 if (ctx->flags & SPU_CREATE_NOSCHED)
228 atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
229
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +1000230 ctx->stats.slb_flt_base = spu->stats.slb_flt;
231 ctx->stats.class2_intr_base = spu->stats.class2_intr;
232
Luke Browning2c911a12008-06-13 14:17:35 +1000233 spu_associate_mm(spu, ctx->owner);
234
235 spin_lock_irq(&spu->register_lock);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500236 spu->ctx = ctx;
237 spu->flags = 0;
238 ctx->spu = spu;
239 ctx->ops = &spu_hw_ops;
240 spu->pid = current->pid;
Bob Nelson14748552007-07-20 21:39:53 +0200241 spu->tgid = current->tgid;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500242 spu->ibox_callback = spufs_ibox_callback;
243 spu->wbox_callback = spufs_wbox_callback;
Arnd Bergmann51104592005-12-05 22:52:25 -0500244 spu->stop_callback = spufs_stop_callback;
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100245 spu->mfc_callback = spufs_mfc_callback;
Luke Browning2c911a12008-06-13 14:17:35 +1000246 spin_unlock_irq(&spu->register_lock);
247
Arnd Bergmann51104592005-12-05 22:52:25 -0500248 spu_unmap_mappings(ctx);
Luke Browning2c911a12008-06-13 14:17:35 +1000249
Christoph Hellwig5158e9b2008-04-29 17:08:38 +1000250 spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500251 spu_restore(&ctx->csa, spu);
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500252 spu->timestamp = jiffies;
Arnd Bergmann86767272006-10-04 17:26:21 +0200253 spu_switch_notify(spu, ctx);
Christoph Hellwig81998ba2007-02-13 21:36:48 +0100254 ctx->state = SPU_STATE_RUNNABLE;
Andre Detsch27ec41d2007-07-20 21:39:33 +0200255
Andre Detsch2a58aa32008-02-25 15:07:42 -0300256 spuctx_switch_state(ctx, SPU_UTIL_USER);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500257}
258
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200259/*
Christoph Hellwig486acd42007-07-20 21:39:54 +0200260 * Must be used with the list_mutex held.
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200261 */
262static inline int sched_spu(struct spu *spu)
263{
Christoph Hellwig486acd42007-07-20 21:39:54 +0200264 BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex));
265
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200266 return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
267}
268
269static void aff_merge_remaining_ctxs(struct spu_gang *gang)
270{
271 struct spu_context *ctx;
272
273 list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
274 if (list_empty(&ctx->aff_list))
275 list_add(&ctx->aff_list, &gang->aff_list_head);
276 }
277 gang->aff_flags |= AFF_MERGED;
278}
279
280static void aff_set_offsets(struct spu_gang *gang)
281{
282 struct spu_context *ctx;
283 int offset;
284
285 offset = -1;
286 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
287 aff_list) {
288 if (&ctx->aff_list == &gang->aff_list_head)
289 break;
290 ctx->aff_offset = offset--;
291 }
292
293 offset = 0;
294 list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
295 if (&ctx->aff_list == &gang->aff_list_head)
296 break;
297 ctx->aff_offset = offset++;
298 }
299
300 gang->aff_flags |= AFF_OFFSETS_SET;
301}
302
303static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
304 int group_size, int lowest_offset)
305{
306 struct spu *spu;
307 int node, n;
308
309 /*
310 * TODO: A better algorithm could be used to find a good spu to be
311 * used as reference location for the ctxs chain.
312 */
313 node = cpu_to_node(raw_smp_processor_id());
314 for (n = 0; n < MAX_NUMNODES; n++, node++) {
Andre Detschad1ede12008-07-24 11:01:54 +1000315 int available_spus;
316
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200317 node = (node < MAX_NUMNODES) ? node : 0;
318 if (!node_allowed(ctx, node))
319 continue;
Andre Detschad1ede12008-07-24 11:01:54 +1000320
321 available_spus = 0;
Christoph Hellwig486acd42007-07-20 21:39:54 +0200322 mutex_lock(&cbe_spu_info[node].list_mutex);
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200323 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
Andre Detschad1ede12008-07-24 11:01:54 +1000324 if (spu->ctx && spu->ctx->gang
325 && spu->ctx->aff_offset == 0)
326 available_spus -=
327 (spu->ctx->gang->contexts - 1);
328 else
329 available_spus++;
330 }
331 if (available_spus < ctx->gang->contexts) {
332 mutex_unlock(&cbe_spu_info[node].list_mutex);
333 continue;
334 }
335
336 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200337 if ((!mem_aff || spu->has_mem_affinity) &&
Christoph Hellwig486acd42007-07-20 21:39:54 +0200338 sched_spu(spu)) {
339 mutex_unlock(&cbe_spu_info[node].list_mutex);
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200340 return spu;
Christoph Hellwig486acd42007-07-20 21:39:54 +0200341 }
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200342 }
Christoph Hellwig486acd42007-07-20 21:39:54 +0200343 mutex_unlock(&cbe_spu_info[node].list_mutex);
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200344 }
345 return NULL;
346}
347
348static void aff_set_ref_point_location(struct spu_gang *gang)
349{
350 int mem_aff, gs, lowest_offset;
351 struct spu_context *ctx;
352 struct spu *tmp;
353
354 mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
355 lowest_offset = 0;
356 gs = 0;
357
358 list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
359 gs++;
360
361 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
362 aff_list) {
363 if (&ctx->aff_list == &gang->aff_list_head)
364 break;
365 lowest_offset = ctx->aff_offset;
366 }
367
Andre Detsch683e3ab2007-07-31 09:48:11 +1000368 gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs,
369 lowest_offset);
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200370}
371
Christoph Hellwig486acd42007-07-20 21:39:54 +0200372static struct spu *ctx_location(struct spu *ref, int offset, int node)
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200373{
374 struct spu *spu;
375
376 spu = NULL;
377 if (offset >= 0) {
378 list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
Christoph Hellwig486acd42007-07-20 21:39:54 +0200379 BUG_ON(spu->node != node);
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200380 if (offset == 0)
381 break;
382 if (sched_spu(spu))
383 offset--;
384 }
385 } else {
386 list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
Christoph Hellwig486acd42007-07-20 21:39:54 +0200387 BUG_ON(spu->node != node);
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200388 if (offset == 0)
389 break;
390 if (sched_spu(spu))
391 offset++;
392 }
393 }
Christoph Hellwig486acd42007-07-20 21:39:54 +0200394
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200395 return spu;
396}
397
398/*
399 * affinity_check is called each time a context is going to be scheduled.
400 * It returns the spu ptr on which the context must run.
401 */
Christoph Hellwig486acd42007-07-20 21:39:54 +0200402static int has_affinity(struct spu_context *ctx)
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200403{
Christoph Hellwig486acd42007-07-20 21:39:54 +0200404 struct spu_gang *gang = ctx->gang;
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200405
406 if (list_empty(&ctx->aff_list))
Christoph Hellwig486acd42007-07-20 21:39:54 +0200407 return 0;
408
Andre Detsch0855b542008-07-24 10:57:26 +1000409 if (atomic_read(&ctx->gang->aff_sched_count) == 0)
410 ctx->gang->aff_ref_spu = NULL;
411
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200412 if (!gang->aff_ref_spu) {
413 if (!(gang->aff_flags & AFF_MERGED))
414 aff_merge_remaining_ctxs(gang);
415 if (!(gang->aff_flags & AFF_OFFSETS_SET))
416 aff_set_offsets(gang);
417 aff_set_ref_point_location(gang);
418 }
Christoph Hellwig486acd42007-07-20 21:39:54 +0200419
420 return gang->aff_ref_spu != NULL;
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200421}
422
Christoph Hellwig202557d2007-02-13 21:36:49 +0100423/**
424 * spu_unbind_context - unbind spu context from physical spu
425 * @spu: physical spu to unbind from
426 * @ctx: context to unbind
Christoph Hellwig202557d2007-02-13 21:36:49 +0100427 */
Christoph Hellwig678b2ff2007-02-13 21:54:25 +0100428static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500429{
Luke Browning028fda02008-06-16 10:42:38 +1000430 u32 status;
431
Christoph Hellwig038200c2008-01-11 15:03:26 +1100432 spu_context_trace(spu_unbind_context__enter, ctx, spu);
433
Andre Detsch27ec41d2007-07-20 21:39:33 +0200434 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000435
Arnd Bergmannaa6d5b22007-07-20 21:39:44 +0200436 if (spu->ctx->flags & SPU_CREATE_NOSCHED)
437 atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
Andre Detsch36ddbb12007-09-19 14:38:12 +1000438
Andre Detsch0855b542008-07-24 10:57:26 +1000439 if (ctx->gang)
440 atomic_dec_if_positive(&ctx->gang->aff_sched_count);
Andre Detsch36ddbb12007-09-19 14:38:12 +1000441
Arnd Bergmann86767272006-10-04 17:26:21 +0200442 spu_switch_notify(spu, NULL);
Arnd Bergmann51104592005-12-05 22:52:25 -0500443 spu_unmap_mappings(ctx);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500444 spu_save(&ctx->csa, spu);
Christoph Hellwig5158e9b2008-04-29 17:08:38 +1000445 spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
Luke Browning2c911a12008-06-13 14:17:35 +1000446
447 spin_lock_irq(&spu->register_lock);
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500448 spu->timestamp = jiffies;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500449 ctx->state = SPU_STATE_SAVED;
450 spu->ibox_callback = NULL;
451 spu->wbox_callback = NULL;
Arnd Bergmann51104592005-12-05 22:52:25 -0500452 spu->stop_callback = NULL;
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100453 spu->mfc_callback = NULL;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500454 spu->pid = 0;
Bob Nelson14748552007-07-20 21:39:53 +0200455 spu->tgid = 0;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500456 ctx->ops = &spu_backing_ops;
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500457 spu->flags = 0;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500458 spu->ctx = NULL;
Luke Browning2c911a12008-06-13 14:17:35 +1000459 spin_unlock_irq(&spu->register_lock);
460
461 spu_associate_mm(spu, NULL);
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +1000462
463 ctx->stats.slb_flt +=
464 (spu->stats.slb_flt - ctx->stats.slb_flt_base);
465 ctx->stats.class2_intr +=
466 (spu->stats.class2_intr - ctx->stats.class2_intr_base);
Andre Detsch27ec41d2007-07-20 21:39:33 +0200467
468 /* This maps the underlying spu state to idle */
469 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
470 ctx->spu = NULL;
Luke Browning028fda02008-06-16 10:42:38 +1000471
472 if (spu_stopped(ctx, &status))
473 wake_up_all(&ctx->stop_wq);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500474}
475
Christoph Hellwig079cdb62007-02-13 21:54:23 +0100476/**
477 * spu_add_to_rq - add a context to the runqueue
478 * @ctx: context to add
479 */
Luke Browning4e0f4ed2007-04-23 21:08:13 +0200480static void __spu_add_to_rq(struct spu_context *ctx)
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500481{
Christoph Hellwig27449972007-06-29 10:58:06 +1000482 /*
483 * Unfortunately this code path can be called from multiple threads
484 * on behalf of a single context due to the way the problem state
485 * mmap support works.
486 *
487 * Fortunately we need to wake up all these threads at the same time
488 * and can simply skip the runqueue addition for every but the first
489 * thread getting into this codepath.
490 *
491 * It's still quite hacky, and long-term we should proxy all other
492 * threads through the owner thread so that spu_run is in control
493 * of all the scheduling activity for a given context.
494 */
495 if (list_empty(&ctx->rq)) {
496 list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
497 set_bit(ctx->prio, spu_prio->bitmap);
498 if (!spu_prio->nr_waiting++)
499 __mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
500 }
Mark Nuttera68cf982006-10-04 17:26:12 +0200501}
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500502
Luke Browninge65c2f62007-12-20 16:39:59 +0900503static void spu_add_to_rq(struct spu_context *ctx)
504{
505 spin_lock(&spu_prio->runq_lock);
506 __spu_add_to_rq(ctx);
507 spin_unlock(&spu_prio->runq_lock);
508}
509
Luke Browning4e0f4ed2007-04-23 21:08:13 +0200510static void __spu_del_from_rq(struct spu_context *ctx)
Christoph Hellwiga475c2f2007-04-23 21:08:11 +0200511{
Luke Browning4e0f4ed2007-04-23 21:08:13 +0200512 int prio = ctx->prio;
513
Christoph Hellwig65de66f2007-06-29 10:58:02 +1000514 if (!list_empty(&ctx->rq)) {
Christoph Hellwigc77239b2007-06-29 10:58:05 +1000515 if (!--spu_prio->nr_waiting)
516 del_timer(&spusched_timer);
Christoph Hellwiga475c2f2007-04-23 21:08:11 +0200517 list_del_init(&ctx->rq);
Christoph Hellwigc77239b2007-06-29 10:58:05 +1000518
519 if (list_empty(&spu_prio->runq[prio]))
520 clear_bit(prio, spu_prio->bitmap);
Christoph Hellwig65de66f2007-06-29 10:58:02 +1000521 }
Mark Nuttera68cf982006-10-04 17:26:12 +0200522}
523
Luke Browninge65c2f62007-12-20 16:39:59 +0900524void spu_del_from_rq(struct spu_context *ctx)
525{
526 spin_lock(&spu_prio->runq_lock);
527 __spu_del_from_rq(ctx);
528 spin_unlock(&spu_prio->runq_lock);
529}
530
Christoph Hellwig079cdb62007-02-13 21:54:23 +0100531static void spu_prio_wait(struct spu_context *ctx)
532{
Mark Nuttera68cf982006-10-04 17:26:12 +0200533 DEFINE_WAIT(wait);
534
Luke Browninge65c2f62007-12-20 16:39:59 +0900535 /*
536 * The caller must explicitly wait for a context to be loaded
537 * if the nosched flag is set. If NOSCHED is not set, the caller
538 * queues the context and waits for an spu event or error.
539 */
540 BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED));
541
Luke Browning4e0f4ed2007-04-23 21:08:13 +0200542 spin_lock(&spu_prio->runq_lock);
Christoph Hellwig079cdb62007-02-13 21:54:23 +0100543 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
Mark Nuttera68cf982006-10-04 17:26:12 +0200544 if (!signal_pending(current)) {
Luke Browning4e0f4ed2007-04-23 21:08:13 +0200545 __spu_add_to_rq(ctx);
546 spin_unlock(&spu_prio->runq_lock);
Christoph Hellwig650f8b02007-02-13 21:36:50 +0100547 mutex_unlock(&ctx->state_mutex);
Mark Nuttera68cf982006-10-04 17:26:12 +0200548 schedule();
Christoph Hellwig650f8b02007-02-13 21:36:50 +0100549 mutex_lock(&ctx->state_mutex);
Luke Browning4e0f4ed2007-04-23 21:08:13 +0200550 spin_lock(&spu_prio->runq_lock);
551 __spu_del_from_rq(ctx);
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500552 }
Luke Browning4e0f4ed2007-04-23 21:08:13 +0200553 spin_unlock(&spu_prio->runq_lock);
Christoph Hellwig079cdb62007-02-13 21:54:23 +0100554 __set_current_state(TASK_RUNNING);
555 remove_wait_queue(&ctx->stop_wq, &wait);
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500556}
557
Christoph Hellwig079cdb62007-02-13 21:54:23 +0100558static struct spu *spu_get_idle(struct spu_context *ctx)
Mark Nuttera68cf982006-10-04 17:26:12 +0200559{
Andre Detsch36ddbb12007-09-19 14:38:12 +1000560 struct spu *spu, *aff_ref_spu;
Christoph Hellwig486acd42007-07-20 21:39:54 +0200561 int node, n;
Mark Nuttera68cf982006-10-04 17:26:12 +0200562
Christoph Hellwig038200c2008-01-11 15:03:26 +1100563 spu_context_nospu_trace(spu_get_idle__enter, ctx);
564
Andre Detsch36ddbb12007-09-19 14:38:12 +1000565 if (ctx->gang) {
566 mutex_lock(&ctx->gang->aff_mutex);
567 if (has_affinity(ctx)) {
568 aff_ref_spu = ctx->gang->aff_ref_spu;
569 atomic_inc(&ctx->gang->aff_sched_count);
570 mutex_unlock(&ctx->gang->aff_mutex);
571 node = aff_ref_spu->node;
Arnd Bergmanncbc23d32007-07-20 21:39:49 +0200572
Andre Detsch36ddbb12007-09-19 14:38:12 +1000573 mutex_lock(&cbe_spu_info[node].list_mutex);
574 spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
575 if (spu && spu->alloc_state == SPU_FREE)
576 goto found;
577 mutex_unlock(&cbe_spu_info[node].list_mutex);
578
Andre Detsch0855b542008-07-24 10:57:26 +1000579 atomic_dec(&ctx->gang->aff_sched_count);
Christoph Hellwig038200c2008-01-11 15:03:26 +1100580 goto not_found;
Andre Detsch36ddbb12007-09-19 14:38:12 +1000581 }
582 mutex_unlock(&ctx->gang->aff_mutex);
Christoph Hellwig486acd42007-07-20 21:39:54 +0200583 }
Christoph Hellwig486acd42007-07-20 21:39:54 +0200584 node = cpu_to_node(raw_smp_processor_id());
Mark Nuttera68cf982006-10-04 17:26:12 +0200585 for (n = 0; n < MAX_NUMNODES; n++, node++) {
586 node = (node < MAX_NUMNODES) ? node : 0;
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000587 if (!node_allowed(ctx, node))
Mark Nuttera68cf982006-10-04 17:26:12 +0200588 continue;
Christoph Hellwig486acd42007-07-20 21:39:54 +0200589
590 mutex_lock(&cbe_spu_info[node].list_mutex);
591 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
592 if (spu->alloc_state == SPU_FREE)
593 goto found;
594 }
595 mutex_unlock(&cbe_spu_info[node].list_mutex);
Mark Nuttera68cf982006-10-04 17:26:12 +0200596 }
Christoph Hellwig486acd42007-07-20 21:39:54 +0200597
Christoph Hellwig038200c2008-01-11 15:03:26 +1100598 not_found:
599 spu_context_nospu_trace(spu_get_idle__not_found, ctx);
Christoph Hellwig486acd42007-07-20 21:39:54 +0200600 return NULL;
601
602 found:
603 spu->alloc_state = SPU_USED;
604 mutex_unlock(&cbe_spu_info[node].list_mutex);
Christoph Hellwig038200c2008-01-11 15:03:26 +1100605 spu_context_trace(spu_get_idle__found, ctx, spu);
Christoph Hellwig486acd42007-07-20 21:39:54 +0200606 spu_init_channels(spu);
Mark Nuttera68cf982006-10-04 17:26:12 +0200607 return spu;
608}
609
Christoph Hellwig079cdb62007-02-13 21:54:23 +0100610/**
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100611 * find_victim - find a lower priority context to preempt
612 * @ctx: canidate context for running
613 *
614 * Returns the freed physical spu to run the new context on.
615 */
616static struct spu *find_victim(struct spu_context *ctx)
617{
618 struct spu_context *victim = NULL;
619 struct spu *spu;
620 int node, n;
621
Julio M. Merino Vidal8a476d42008-04-30 15:16:14 +1000622 spu_context_nospu_trace(spu_find_victim__enter, ctx);
Christoph Hellwig038200c2008-01-11 15:03:26 +1100623
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100624 /*
625 * Look for a possible preemption candidate on the local node first.
626 * If there is no candidate look at the other nodes. This isn't
Julio M. Merino Vidal9b1d21f2007-12-20 16:39:59 +0900627 * exactly fair, but so far the whole spu scheduler tries to keep
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100628 * a strong node affinity. We might want to fine-tune this in
629 * the future.
630 */
631 restart:
632 node = cpu_to_node(raw_smp_processor_id());
633 for (n = 0; n < MAX_NUMNODES; n++, node++) {
634 node = (node < MAX_NUMNODES) ? node : 0;
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000635 if (!node_allowed(ctx, node))
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100636 continue;
637
Christoph Hellwig486acd42007-07-20 21:39:54 +0200638 mutex_lock(&cbe_spu_info[node].list_mutex);
639 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100640 struct spu_context *tmp = spu->ctx;
641
Christoph Hellwigc0e7b4a2007-09-19 14:38:12 +1000642 if (tmp && tmp->prio > ctx->prio &&
Luke Browninge65c2f62007-12-20 16:39:59 +0900643 !(tmp->flags & SPU_CREATE_NOSCHED) &&
Jeremy Kerr8d5636f2008-08-14 14:59:12 +1000644 (!victim || tmp->prio > victim->prio)) {
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100645 victim = spu->ctx;
Jeremy Kerr8d5636f2008-08-14 14:59:12 +1000646 get_spu_context(victim);
647 }
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100648 }
Christoph Hellwig486acd42007-07-20 21:39:54 +0200649 mutex_unlock(&cbe_spu_info[node].list_mutex);
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100650
651 if (victim) {
652 /*
653 * This nests ctx->state_mutex, but we always lock
654 * higher priority contexts before lower priority
655 * ones, so this is safe until we introduce
656 * priority inheritance schemes.
Luke Browning91569532007-12-20 16:39:59 +0900657 *
658 * XXX if the highest priority context is locked,
659 * this can loop a long time. Might be better to
660 * look at another context or give up after X retries.
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100661 */
662 if (!mutex_trylock(&victim->state_mutex)) {
Jeremy Kerr8d5636f2008-08-14 14:59:12 +1000663 put_spu_context(victim);
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100664 victim = NULL;
665 goto restart;
666 }
667
668 spu = victim->spu;
Luke Browningb1925412007-12-20 16:39:59 +0900669 if (!spu || victim->prio <= ctx->prio) {
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100670 /*
671 * This race can happen because we've dropped
Luke Browningb1925412007-12-20 16:39:59 +0900672 * the active list mutex. Not a problem, just
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100673 * restart the search.
674 */
675 mutex_unlock(&victim->state_mutex);
Jeremy Kerr8d5636f2008-08-14 14:59:12 +1000676 put_spu_context(victim);
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100677 victim = NULL;
678 goto restart;
679 }
Christoph Hellwig486acd42007-07-20 21:39:54 +0200680
Christoph Hellwig038200c2008-01-11 15:03:26 +1100681 spu_context_trace(__spu_deactivate__unload, ctx, spu);
682
Christoph Hellwig486acd42007-07-20 21:39:54 +0200683 mutex_lock(&cbe_spu_info[node].list_mutex);
684 cbe_spu_info[node].nr_active--;
Christoph Hellwigc0e7b4a2007-09-19 14:38:12 +1000685 spu_unbind_context(spu, victim);
Christoph Hellwig486acd42007-07-20 21:39:54 +0200686 mutex_unlock(&cbe_spu_info[node].list_mutex);
687
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +1000688 victim->stats.invol_ctx_switch++;
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000689 spu->stats.invol_ctx_switch++;
Luke Browning08fcf1d2008-05-12 14:36:59 +0000690 if (test_bit(SPU_SCHED_SPU_RUN, &victim->sched_flags))
Christoph Hellwig7a28a152008-05-08 15:26:32 +1000691 spu_add_to_rq(victim);
Luke Browninge65c2f62007-12-20 16:39:59 +0900692
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100693 mutex_unlock(&victim->state_mutex);
Jeremy Kerr8d5636f2008-08-14 14:59:12 +1000694 put_spu_context(victim);
Luke Browninge65c2f62007-12-20 16:39:59 +0900695
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100696 return spu;
697 }
698 }
699
700 return NULL;
701}
702
Luke Browninge65c2f62007-12-20 16:39:59 +0900703static void __spu_schedule(struct spu *spu, struct spu_context *ctx)
704{
705 int node = spu->node;
706 int success = 0;
707
708 spu_set_timeslice(ctx);
709
710 mutex_lock(&cbe_spu_info[node].list_mutex);
711 if (spu->ctx == NULL) {
712 spu_bind_context(spu, ctx);
713 cbe_spu_info[node].nr_active++;
714 spu->alloc_state = SPU_USED;
715 success = 1;
716 }
717 mutex_unlock(&cbe_spu_info[node].list_mutex);
718
719 if (success)
720 wake_up_all(&ctx->run_wq);
721 else
722 spu_add_to_rq(ctx);
723}
724
725static void spu_schedule(struct spu *spu, struct spu_context *ctx)
726{
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900727 /* not a candidate for interruptible because it's called either
728 from the scheduler thread or from spu_deactivate */
729 mutex_lock(&ctx->state_mutex);
Luke Browninge65c2f62007-12-20 16:39:59 +0900730 __spu_schedule(spu, ctx);
731 spu_release(ctx);
732}
733
734static void spu_unschedule(struct spu *spu, struct spu_context *ctx)
735{
736 int node = spu->node;
737
738 mutex_lock(&cbe_spu_info[node].list_mutex);
739 cbe_spu_info[node].nr_active--;
740 spu->alloc_state = SPU_FREE;
741 spu_unbind_context(spu, ctx);
742 ctx->stats.invol_ctx_switch++;
743 spu->stats.invol_ctx_switch++;
744 mutex_unlock(&cbe_spu_info[node].list_mutex);
745}
746
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100747/**
Christoph Hellwig079cdb62007-02-13 21:54:23 +0100748 * spu_activate - find a free spu for a context and execute it
749 * @ctx: spu context to schedule
750 * @flags: flags (currently ignored)
751 *
Christoph Hellwig08873092007-04-23 21:08:06 +0200752 * Tries to find a free spu to run @ctx. If no free spu is available
Christoph Hellwig079cdb62007-02-13 21:54:23 +0100753 * add the context to the runqueue so it gets woken up once an spu
754 * is available.
755 */
Christoph Hellwig26bec672007-02-13 21:54:24 +0100756int spu_activate(struct spu_context *ctx, unsigned long flags)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500757{
Luke Browninge65c2f62007-12-20 16:39:59 +0900758 struct spu *spu;
Christoph Hellwig079cdb62007-02-13 21:54:23 +0100759
Luke Browninge65c2f62007-12-20 16:39:59 +0900760 /*
761 * If there are multiple threads waiting for a single context
762 * only one actually binds the context while the others will
763 * only be able to acquire the state_mutex once the context
764 * already is in runnable state.
765 */
766 if (ctx->spu)
767 return 0;
Christoph Hellwig27449972007-06-29 10:58:06 +1000768
Luke Browninge65c2f62007-12-20 16:39:59 +0900769spu_activate_top:
770 if (signal_pending(current))
771 return -ERESTARTSYS;
Christoph Hellwig486acd42007-07-20 21:39:54 +0200772
Luke Browninge65c2f62007-12-20 16:39:59 +0900773 spu = spu_get_idle(ctx);
774 /*
775 * If this is a realtime thread we try to get it running by
776 * preempting a lower priority thread.
777 */
778 if (!spu && rt_prio(ctx->prio))
779 spu = find_victim(ctx);
780 if (spu) {
781 unsigned long runcntl;
Christoph Hellwig079cdb62007-02-13 21:54:23 +0100782
Luke Browninge65c2f62007-12-20 16:39:59 +0900783 runcntl = ctx->ops->runcntl_read(ctx);
784 __spu_schedule(spu, ctx);
785 if (runcntl & SPU_RUNCNTL_RUNNABLE)
786 spuctx_switch_state(ctx, SPU_UTIL_USER);
787
788 return 0;
789 }
790
791 if (ctx->flags & SPU_CREATE_NOSCHED) {
Christoph Hellwig50b520d2007-03-10 00:05:36 +0100792 spu_prio_wait(ctx);
Luke Browninge65c2f62007-12-20 16:39:59 +0900793 goto spu_activate_top;
794 }
Christoph Hellwig079cdb62007-02-13 21:54:23 +0100795
Luke Browninge65c2f62007-12-20 16:39:59 +0900796 spu_add_to_rq(ctx);
797
798 return 0;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500799}
800
Christoph Hellwig678b2ff2007-02-13 21:54:25 +0100801/**
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000802 * grab_runnable_context - try to find a runnable context
803 *
804 * Remove the highest priority context on the runqueue and return it
805 * to the caller. Returns %NULL if no runnable context was found.
806 */
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000807static struct spu_context *grab_runnable_context(int prio, int node)
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000808{
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000809 struct spu_context *ctx;
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000810 int best;
811
812 spin_lock(&spu_prio->runq_lock);
Masato Noguchi7e90b742007-07-20 21:39:43 +0200813 best = find_first_bit(spu_prio->bitmap, prio);
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000814 while (best < prio) {
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000815 struct list_head *rq = &spu_prio->runq[best];
816
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000817 list_for_each_entry(ctx, rq, rq) {
818 /* XXX(hch): check for affinity here aswell */
819 if (__node_allowed(ctx, node)) {
820 __spu_del_from_rq(ctx);
821 goto found;
822 }
823 }
824 best++;
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000825 }
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000826 ctx = NULL;
827 found:
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000828 spin_unlock(&spu_prio->runq_lock);
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000829 return ctx;
830}
831
832static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
833{
834 struct spu *spu = ctx->spu;
835 struct spu_context *new = NULL;
836
837 if (spu) {
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000838 new = grab_runnable_context(max_prio, spu->node);
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000839 if (new || force) {
Luke Browninge65c2f62007-12-20 16:39:59 +0900840 spu_unschedule(spu, ctx);
841 if (new) {
842 if (new->flags & SPU_CREATE_NOSCHED)
843 wake_up(&new->stop_wq);
844 else {
845 spu_release(ctx);
846 spu_schedule(spu, new);
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900847 /* this one can't easily be made
848 interruptible */
849 mutex_lock(&ctx->state_mutex);
Luke Browninge65c2f62007-12-20 16:39:59 +0900850 }
851 }
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000852 }
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000853 }
854
855 return new != NULL;
856}
857
858/**
Christoph Hellwig678b2ff2007-02-13 21:54:25 +0100859 * spu_deactivate - unbind a context from it's physical spu
860 * @ctx: spu context to unbind
861 *
862 * Unbind @ctx from the physical spu it is running on and schedule
863 * the highest priority context to run on the freed physical spu.
864 */
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500865void spu_deactivate(struct spu_context *ctx)
866{
Christoph Hellwig038200c2008-01-11 15:03:26 +1100867 spu_context_nospu_trace(spu_deactivate__enter, ctx);
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000868 __spu_deactivate(ctx, 1, MAX_PRIO);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500869}
870
Christoph Hellwigae7b4c52007-02-13 21:54:26 +0100871/**
Bob Nelson14748552007-07-20 21:39:53 +0200872 * spu_yield - yield a physical spu if others are waiting
Christoph Hellwigae7b4c52007-02-13 21:54:26 +0100873 * @ctx: spu context to yield
874 *
875 * Check if there is a higher priority context waiting and if yes
876 * unbind @ctx from the physical spu and schedule the highest
877 * priority context to run on the freed physical spu instead.
878 */
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500879void spu_yield(struct spu_context *ctx)
880{
Christoph Hellwig038200c2008-01-11 15:03:26 +1100881 spu_context_nospu_trace(spu_yield__enter, ctx);
Christoph Hellwige5c0b9e2007-06-05 11:25:59 +1000882 if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
883 mutex_lock(&ctx->state_mutex);
Andre Detsch27ec41d2007-07-20 21:39:33 +0200884 __spu_deactivate(ctx, 0, MAX_PRIO);
Christoph Hellwige5c0b9e2007-06-05 11:25:59 +1000885 mutex_unlock(&ctx->state_mutex);
886 }
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000887}
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500888
Christoph Hellwig486acd42007-07-20 21:39:54 +0200889static noinline void spusched_tick(struct spu_context *ctx)
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000890{
Luke Browninge65c2f62007-12-20 16:39:59 +0900891 struct spu_context *new = NULL;
892 struct spu *spu = NULL;
Luke Browninge65c2f62007-12-20 16:39:59 +0900893
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900894 if (spu_acquire(ctx))
895 BUG(); /* a kernel thread never has signals pending */
Luke Browninge65c2f62007-12-20 16:39:59 +0900896
897 if (ctx->state != SPU_STATE_RUNNABLE)
898 goto out;
Christoph Hellwigdf09cf32007-06-29 10:57:58 +1000899 if (ctx->flags & SPU_CREATE_NOSCHED)
Luke Browninge65c2f62007-12-20 16:39:59 +0900900 goto out;
Christoph Hellwigdf09cf32007-06-29 10:57:58 +1000901 if (ctx->policy == SCHED_FIFO)
Luke Browninge65c2f62007-12-20 16:39:59 +0900902 goto out;
Christoph Hellwigdf09cf32007-06-29 10:57:58 +1000903
Jeremy Kerrce7c1912008-03-04 20:17:02 +1100904 if (--ctx->time_slice && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
Luke Browninge65c2f62007-12-20 16:39:59 +0900905 goto out;
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000906
Luke Browninge65c2f62007-12-20 16:39:59 +0900907 spu = ctx->spu;
Christoph Hellwig038200c2008-01-11 15:03:26 +1100908
909 spu_context_trace(spusched_tick__preempt, ctx, spu);
910
Luke Browninge65c2f62007-12-20 16:39:59 +0900911 new = grab_runnable_context(ctx->prio + 1, spu->node);
912 if (new) {
913 spu_unschedule(spu, ctx);
Jeremy Kerrce7c1912008-03-04 20:17:02 +1100914 if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
Jeremy Kerr4ef11012008-02-19 10:05:35 +1100915 spu_add_to_rq(ctx);
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000916 } else {
Christoph Hellwig038200c2008-01-11 15:03:26 +1100917 spu_context_nospu_trace(spusched_tick__newslice, ctx);
Luke Browning2442a8b2008-06-06 11:26:54 +0800918 if (!ctx->time_slice)
919 ctx->time_slice++;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500920 }
Luke Browninge65c2f62007-12-20 16:39:59 +0900921out:
922 spu_release(ctx);
923
924 if (new)
925 spu_schedule(spu, new);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500926}
927
Christoph Hellwig65de66f2007-06-29 10:58:02 +1000928/**
929 * count_active_contexts - count nr of active tasks
930 *
931 * Return the number of tasks currently running or waiting to run.
932 *
Christoph Hellwig486acd42007-07-20 21:39:54 +0200933 * Note that we don't take runq_lock / list_mutex here. Reading
Christoph Hellwig65de66f2007-06-29 10:58:02 +1000934 * a single 32bit value is atomic on powerpc, and we don't care
935 * about memory ordering issues here.
936 */
937static unsigned long count_active_contexts(void)
938{
939 int nr_active = 0, node;
940
941 for (node = 0; node < MAX_NUMNODES; node++)
Christoph Hellwig486acd42007-07-20 21:39:54 +0200942 nr_active += cbe_spu_info[node].nr_active;
Christoph Hellwig65de66f2007-06-29 10:58:02 +1000943 nr_active += spu_prio->nr_waiting;
944
945 return nr_active;
946}
947
948/**
Aegis Lin90608a22007-12-20 16:39:59 +0900949 * spu_calc_load - update the avenrun load estimates.
Christoph Hellwig65de66f2007-06-29 10:58:02 +1000950 *
951 * No locking against reading these values from userspace, as for
952 * the CPU loadavg code.
953 */
Aegis Lin90608a22007-12-20 16:39:59 +0900954static void spu_calc_load(void)
Christoph Hellwig65de66f2007-06-29 10:58:02 +1000955{
956 unsigned long active_tasks; /* fixed-point */
Christoph Hellwig65de66f2007-06-29 10:58:02 +1000957
Aegis Lin90608a22007-12-20 16:39:59 +0900958 active_tasks = count_active_contexts() * FIXED_1;
959 CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
960 CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
961 CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
Christoph Hellwig65de66f2007-06-29 10:58:02 +1000962}
963
Christoph Hellwig37901802007-06-29 10:57:51 +1000964static void spusched_wake(unsigned long data)
965{
966 mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
967 wake_up_process(spusched_task);
Aegis Lin90608a22007-12-20 16:39:59 +0900968}
969
970static void spuloadavg_wake(unsigned long data)
971{
972 mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ);
973 spu_calc_load();
Christoph Hellwig37901802007-06-29 10:57:51 +1000974}
975
976static int spusched_thread(void *unused)
977{
Christoph Hellwig486acd42007-07-20 21:39:54 +0200978 struct spu *spu;
Christoph Hellwig37901802007-06-29 10:57:51 +1000979 int node;
980
Christoph Hellwig37901802007-06-29 10:57:51 +1000981 while (!kthread_should_stop()) {
982 set_current_state(TASK_INTERRUPTIBLE);
983 schedule();
984 for (node = 0; node < MAX_NUMNODES; node++) {
Luke Browninge65c2f62007-12-20 16:39:59 +0900985 struct mutex *mtx = &cbe_spu_info[node].list_mutex;
986
987 mutex_lock(mtx);
988 list_for_each_entry(spu, &cbe_spu_info[node].spus,
989 cbe_list) {
990 struct spu_context *ctx = spu->ctx;
991
992 if (ctx) {
Jeremy Kerr8d5636f2008-08-14 14:59:12 +1000993 get_spu_context(ctx);
Luke Browninge65c2f62007-12-20 16:39:59 +0900994 mutex_unlock(mtx);
995 spusched_tick(ctx);
996 mutex_lock(mtx);
Jeremy Kerr8d5636f2008-08-14 14:59:12 +1000997 put_spu_context(ctx);
Luke Browninge65c2f62007-12-20 16:39:59 +0900998 }
999 }
1000 mutex_unlock(mtx);
Christoph Hellwig37901802007-06-29 10:57:51 +10001001 }
1002 }
1003
Christoph Hellwig37901802007-06-29 10:57:51 +10001004 return 0;
1005}
1006
Jeremy Kerr7cd58e42007-12-20 16:39:59 +09001007void spuctx_switch_state(struct spu_context *ctx,
1008 enum spu_utilization_state new_state)
1009{
1010 unsigned long long curtime;
1011 signed long long delta;
1012 struct timespec ts;
1013 struct spu *spu;
1014 enum spu_utilization_state old_state;
Maxim Shchetyninfabb6572008-07-05 05:05:39 +10001015 int node;
Jeremy Kerr7cd58e42007-12-20 16:39:59 +09001016
1017 ktime_get_ts(&ts);
1018 curtime = timespec_to_ns(&ts);
1019 delta = curtime - ctx->stats.tstamp;
1020
1021 WARN_ON(!mutex_is_locked(&ctx->state_mutex));
1022 WARN_ON(delta < 0);
1023
1024 spu = ctx->spu;
1025 old_state = ctx->stats.util_state;
1026 ctx->stats.util_state = new_state;
1027 ctx->stats.tstamp = curtime;
1028
1029 /*
1030 * Update the physical SPU utilization statistics.
1031 */
1032 if (spu) {
1033 ctx->stats.times[old_state] += delta;
1034 spu->stats.times[old_state] += delta;
1035 spu->stats.util_state = new_state;
1036 spu->stats.tstamp = curtime;
Maxim Shchetyninfabb6572008-07-05 05:05:39 +10001037 node = spu->node;
1038 if (old_state == SPU_UTIL_USER)
1039 atomic_dec(&cbe_spu_info[node].busy_spus);
Ilpo Järvinencb9808d2008-08-19 08:48:57 +03001040 if (new_state == SPU_UTIL_USER)
Maxim Shchetyninfabb6572008-07-05 05:05:39 +10001041 atomic_inc(&cbe_spu_info[node].busy_spus);
Jeremy Kerr7cd58e42007-12-20 16:39:59 +09001042 }
1043}
1044
Christoph Hellwig65de66f2007-06-29 10:58:02 +10001045#define LOAD_INT(x) ((x) >> FSHIFT)
1046#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
1047
1048static int show_spu_loadavg(struct seq_file *s, void *private)
1049{
1050 int a, b, c;
1051
1052 a = spu_avenrun[0] + (FIXED_1/200);
1053 b = spu_avenrun[1] + (FIXED_1/200);
1054 c = spu_avenrun[2] + (FIXED_1/200);
1055
1056 /*
1057 * Note that last_pid doesn't really make much sense for the
Julio M. Merino Vidal9b1d21f2007-12-20 16:39:59 +09001058 * SPU loadavg (it even seems very odd on the CPU side...),
Christoph Hellwig65de66f2007-06-29 10:58:02 +10001059 * but we include it here to have a 100% compatible interface.
1060 */
1061 seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
1062 LOAD_INT(a), LOAD_FRAC(a),
1063 LOAD_INT(b), LOAD_FRAC(b),
1064 LOAD_INT(c), LOAD_FRAC(c),
1065 count_active_contexts(),
1066 atomic_read(&nr_spu_contexts),
1067 current->nsproxy->pid_ns->last_pid);
1068 return 0;
1069}
1070
1071static int spu_loadavg_open(struct inode *inode, struct file *file)
1072{
1073 return single_open(file, show_spu_loadavg, NULL);
1074}
1075
1076static const struct file_operations spu_loadavg_fops = {
1077 .open = spu_loadavg_open,
1078 .read = seq_read,
1079 .llseek = seq_lseek,
1080 .release = single_release,
1081};
1082
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001083int __init spu_sched_init(void)
1084{
Christoph Hellwig65de66f2007-06-29 10:58:02 +10001085 struct proc_dir_entry *entry;
1086 int err = -ENOMEM, i;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001087
Mark Nuttera68cf982006-10-04 17:26:12 +02001088 spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
Christoph Hellwig37901802007-06-29 10:57:51 +10001089 if (!spu_prio)
Christoph Hellwig65de66f2007-06-29 10:58:02 +10001090 goto out;
Christoph Hellwig37901802007-06-29 10:57:51 +10001091
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001092 for (i = 0; i < MAX_PRIO; i++) {
Christoph Hellwig079cdb62007-02-13 21:54:23 +01001093 INIT_LIST_HEAD(&spu_prio->runq[i]);
Mark Nuttera68cf982006-10-04 17:26:12 +02001094 __clear_bit(i, spu_prio->bitmap);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001095 }
Christoph Hellwig079cdb62007-02-13 21:54:23 +01001096 spin_lock_init(&spu_prio->runq_lock);
Christoph Hellwig37901802007-06-29 10:57:51 +10001097
Christoph Hellwigc77239b2007-06-29 10:58:05 +10001098 setup_timer(&spusched_timer, spusched_wake, 0);
Aegis Lin90608a22007-12-20 16:39:59 +09001099 setup_timer(&spuloadavg_timer, spuloadavg_wake, 0);
Christoph Hellwigc77239b2007-06-29 10:58:05 +10001100
Christoph Hellwig37901802007-06-29 10:57:51 +10001101 spusched_task = kthread_run(spusched_thread, NULL, "spusched");
1102 if (IS_ERR(spusched_task)) {
Christoph Hellwig65de66f2007-06-29 10:58:02 +10001103 err = PTR_ERR(spusched_task);
1104 goto out_free_spu_prio;
Christoph Hellwig37901802007-06-29 10:57:51 +10001105 }
Jeremy Kerrf3f59be2007-06-29 10:57:54 +10001106
Aegis Lin90608a22007-12-20 16:39:59 +09001107 mod_timer(&spuloadavg_timer, 0);
1108
Denis V. Lunev66747132008-04-29 01:02:26 -07001109 entry = proc_create("spu_loadavg", 0, NULL, &spu_loadavg_fops);
Christoph Hellwig65de66f2007-06-29 10:58:02 +10001110 if (!entry)
1111 goto out_stop_kthread;
Christoph Hellwig65de66f2007-06-29 10:58:02 +10001112
Jeremy Kerrf3f59be2007-06-29 10:57:54 +10001113 pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
1114 SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001115 return 0;
Christoph Hellwig37901802007-06-29 10:57:51 +10001116
Christoph Hellwig65de66f2007-06-29 10:58:02 +10001117 out_stop_kthread:
1118 kthread_stop(spusched_task);
1119 out_free_spu_prio:
1120 kfree(spu_prio);
1121 out:
1122 return err;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001123}
1124
Sebastian Siewiord1450312007-07-20 21:39:29 +02001125void spu_sched_exit(void)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001126{
Christoph Hellwig486acd42007-07-20 21:39:54 +02001127 struct spu *spu;
Mark Nuttera68cf982006-10-04 17:26:12 +02001128 int node;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001129
Christoph Hellwig65de66f2007-06-29 10:58:02 +10001130 remove_proc_entry("spu_loadavg", NULL);
1131
Christoph Hellwigc77239b2007-06-29 10:58:05 +10001132 del_timer_sync(&spusched_timer);
Aegis Lin90608a22007-12-20 16:39:59 +09001133 del_timer_sync(&spuloadavg_timer);
Christoph Hellwig37901802007-06-29 10:57:51 +10001134 kthread_stop(spusched_task);
1135
Mark Nuttera68cf982006-10-04 17:26:12 +02001136 for (node = 0; node < MAX_NUMNODES; node++) {
Christoph Hellwig486acd42007-07-20 21:39:54 +02001137 mutex_lock(&cbe_spu_info[node].list_mutex);
1138 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
1139 if (spu->alloc_state != SPU_FREE)
1140 spu->alloc_state = SPU_FREE;
1141 mutex_unlock(&cbe_spu_info[node].list_mutex);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001142 }
Mark Nuttera68cf982006-10-04 17:26:12 +02001143 kfree(spu_prio);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001144}