blob: 3a5972117de7cdcd04504ae46d97e8b79376e0bc [file] [log] [blame]
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001/* sched.c - SPU scheduler.
2 *
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
5 *
Mark Nuttera68cf982006-10-04 17:26:12 +02006 * 2006-03-31 NUMA domains added.
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05007 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
Arnd Bergmann3b3d22c2005-12-05 22:52:24 -050023#undef DEBUG
24
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050025#include <linux/module.h>
26#include <linux/errno.h>
27#include <linux/sched.h>
28#include <linux/kernel.h>
29#include <linux/mm.h>
30#include <linux/completion.h>
31#include <linux/vmalloc.h>
32#include <linux/smp.h>
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050033#include <linux/stddef.h>
34#include <linux/unistd.h>
Mark Nuttera68cf982006-10-04 17:26:12 +020035#include <linux/numa.h>
36#include <linux/mutex.h>
Arnd Bergmann86767272006-10-04 17:26:21 +020037#include <linux/notifier.h>
Christoph Hellwig37901802007-06-29 10:57:51 +100038#include <linux/kthread.h>
Christoph Hellwig65de66f2007-06-29 10:58:02 +100039#include <linux/pid_namespace.h>
40#include <linux/proc_fs.h>
41#include <linux/seq_file.h>
Christoph Hellwig038200c2008-01-11 15:03:26 +110042#include <linux/marker.h>
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050043
44#include <asm/io.h>
45#include <asm/mmu_context.h>
46#include <asm/spu.h>
47#include <asm/spu_csa.h>
Geoff Levanda91942a2006-06-19 20:33:30 +020048#include <asm/spu_priv1.h>
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050049#include "spufs.h"
50
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050051struct spu_prio_array {
Christoph Hellwig72cb3602007-02-13 21:54:28 +010052 DECLARE_BITMAP(bitmap, MAX_PRIO);
Christoph Hellwig079cdb62007-02-13 21:54:23 +010053 struct list_head runq[MAX_PRIO];
54 spinlock_t runq_lock;
Christoph Hellwig65de66f2007-06-29 10:58:02 +100055 int nr_waiting;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050056};
57
Christoph Hellwig65de66f2007-06-29 10:58:02 +100058static unsigned long spu_avenrun[3];
Mark Nuttera68cf982006-10-04 17:26:12 +020059static struct spu_prio_array *spu_prio;
Christoph Hellwig37901802007-06-29 10:57:51 +100060static struct task_struct *spusched_task;
61static struct timer_list spusched_timer;
Aegis Lin90608a22007-12-20 16:39:59 +090062static struct timer_list spuloadavg_timer;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050063
Christoph Hellwigfe443ef2007-06-29 10:57:52 +100064/*
65 * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
66 */
67#define NORMAL_PRIO 120
68
69/*
70 * Frequency of the spu scheduler tick. By default we do one SPU scheduler
71 * tick for every 10 CPU scheduler ticks.
72 */
73#define SPUSCHED_TICK (10)
74
75/*
76 * These are the 'tuning knobs' of the scheduler:
77 *
Jeremy Kerr60e24232007-06-29 10:57:53 +100078 * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
79 * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
Christoph Hellwigfe443ef2007-06-29 10:57:52 +100080 */
Jeremy Kerr60e24232007-06-29 10:57:53 +100081#define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
82#define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK))
Christoph Hellwigfe443ef2007-06-29 10:57:52 +100083
84#define MAX_USER_PRIO (MAX_PRIO - MAX_RT_PRIO)
85#define SCALE_PRIO(x, prio) \
86 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
87
88/*
89 * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
90 * [800ms ... 100ms ... 5ms]
91 *
92 * The higher a thread's priority, the bigger timeslices
93 * it gets during one round of execution. But even the lowest
94 * priority thread gets MIN_TIMESLICE worth of execution time.
95 */
96void spu_set_timeslice(struct spu_context *ctx)
97{
98 if (ctx->prio < NORMAL_PRIO)
99 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
100 else
101 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
102}
103
Christoph Hellwig2cf2b3b2007-06-29 10:57:55 +1000104/*
105 * Update scheduling information from the owning thread.
106 */
107void __spu_update_sched_info(struct spu_context *ctx)
108{
109 /*
Luke Browning91569532007-12-20 16:39:59 +0900110 * assert that the context is not on the runqueue, so it is safe
111 * to change its scheduling parameters.
112 */
113 BUG_ON(!list_empty(&ctx->rq));
114
115 /*
Julio M. Merino Vidal9b1d21f2007-12-20 16:39:59 +0900116 * 32-Bit assignments are atomic on powerpc, and we don't care about
117 * memory ordering here because retrieving the controlling thread is
118 * per definition racy.
Christoph Hellwig476273a2007-06-29 10:58:01 +1000119 */
120 ctx->tid = current->pid;
121
122 /*
Christoph Hellwig2cf2b3b2007-06-29 10:57:55 +1000123 * We do our own priority calculations, so we normally want
Julio M. Merino Vidal9b1d21f2007-12-20 16:39:59 +0900124 * ->static_prio to start with. Unfortunately this field
Christoph Hellwig2cf2b3b2007-06-29 10:57:55 +1000125 * contains junk for threads with a realtime scheduling
126 * policy so we have to look at ->prio in this case.
127 */
128 if (rt_prio(current->prio))
129 ctx->prio = current->prio;
130 else
131 ctx->prio = current->static_prio;
132 ctx->policy = current->policy;
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000133
134 /*
Luke Browning91569532007-12-20 16:39:59 +0900135 * TO DO: the context may be loaded, so we may need to activate
136 * it again on a different node. But it shouldn't hurt anything
137 * to update its parameters, because we know that the scheduler
138 * is not actively looking at this field, since it is not on the
139 * runqueue. The context will be rescheduled on the proper node
140 * if it is timesliced or preempted.
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000141 */
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000142 ctx->cpus_allowed = current->cpus_allowed;
Christoph Hellwig2cf2b3b2007-06-29 10:57:55 +1000143}
144
145void spu_update_sched_info(struct spu_context *ctx)
146{
Luke Browning91569532007-12-20 16:39:59 +0900147 int node;
Christoph Hellwig2cf2b3b2007-06-29 10:57:55 +1000148
Luke Browning91569532007-12-20 16:39:59 +0900149 if (ctx->state == SPU_STATE_RUNNABLE) {
150 node = ctx->spu->node;
Luke Browninge65c2f62007-12-20 16:39:59 +0900151
152 /*
153 * Take list_mutex to sync with find_victim().
154 */
Luke Browning91569532007-12-20 16:39:59 +0900155 mutex_lock(&cbe_spu_info[node].list_mutex);
156 __spu_update_sched_info(ctx);
157 mutex_unlock(&cbe_spu_info[node].list_mutex);
158 } else {
159 __spu_update_sched_info(ctx);
160 }
Christoph Hellwig2cf2b3b2007-06-29 10:57:55 +1000161}
162
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000163static int __node_allowed(struct spu_context *ctx, int node)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500164{
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000165 if (nr_cpus_node(node)) {
166 cpumask_t mask = node_to_cpumask(node);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500167
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000168 if (cpus_intersects(mask, ctx->cpus_allowed))
169 return 1;
170 }
171
172 return 0;
173}
174
175static int node_allowed(struct spu_context *ctx, int node)
176{
177 int rval;
178
179 spin_lock(&spu_prio->runq_lock);
180 rval = __node_allowed(ctx, node);
181 spin_unlock(&spu_prio->runq_lock);
182
183 return rval;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500184}
185
Bob Nelsonaed3a8c2007-12-15 01:27:30 +1100186void do_notify_spus_active(void)
Bob Nelson36aaccc2007-07-20 21:39:52 +0200187{
188 int node;
189
190 /*
191 * Wake up the active spu_contexts.
192 *
193 * When the awakened processes see their "notify_active" flag is set,
Julio M. Merino Vidal9b1d21f2007-12-20 16:39:59 +0900194 * they will call spu_switch_notify().
Bob Nelson36aaccc2007-07-20 21:39:52 +0200195 */
196 for_each_online_node(node) {
197 struct spu *spu;
Christoph Hellwig486acd42007-07-20 21:39:54 +0200198
199 mutex_lock(&cbe_spu_info[node].list_mutex);
200 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
201 if (spu->alloc_state != SPU_FREE) {
202 struct spu_context *ctx = spu->ctx;
203 set_bit(SPU_SCHED_NOTIFY_ACTIVE,
204 &ctx->sched_flags);
205 mb();
206 wake_up_all(&ctx->stop_wq);
207 }
Bob Nelson36aaccc2007-07-20 21:39:52 +0200208 }
Christoph Hellwig486acd42007-07-20 21:39:54 +0200209 mutex_unlock(&cbe_spu_info[node].list_mutex);
Bob Nelson36aaccc2007-07-20 21:39:52 +0200210 }
211}
212
Christoph Hellwig202557d2007-02-13 21:36:49 +0100213/**
214 * spu_bind_context - bind spu context to physical spu
215 * @spu: physical spu to bind to
216 * @ctx: context to bind
217 */
218static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500219{
Christoph Hellwig038200c2008-01-11 15:03:26 +1100220 spu_context_trace(spu_bind_context__enter, ctx, spu);
221
Andre Detsch27ec41d2007-07-20 21:39:33 +0200222 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +1000223
Arnd Bergmannaa6d5b22007-07-20 21:39:44 +0200224 if (ctx->flags & SPU_CREATE_NOSCHED)
225 atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
226
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +1000227 ctx->stats.slb_flt_base = spu->stats.slb_flt;
228 ctx->stats.class2_intr_base = spu->stats.class2_intr;
229
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500230 spu->ctx = ctx;
231 spu->flags = 0;
232 ctx->spu = spu;
233 ctx->ops = &spu_hw_ops;
234 spu->pid = current->pid;
Bob Nelson14748552007-07-20 21:39:53 +0200235 spu->tgid = current->tgid;
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100236 spu_associate_mm(spu, ctx->owner);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500237 spu->ibox_callback = spufs_ibox_callback;
238 spu->wbox_callback = spufs_wbox_callback;
Arnd Bergmann51104592005-12-05 22:52:25 -0500239 spu->stop_callback = spufs_stop_callback;
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100240 spu->mfc_callback = spufs_mfc_callback;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500241 mb();
Arnd Bergmann51104592005-12-05 22:52:25 -0500242 spu_unmap_mappings(ctx);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500243 spu_restore(&ctx->csa, spu);
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500244 spu->timestamp = jiffies;
Mark Nuttera68cf982006-10-04 17:26:12 +0200245 spu_cpu_affinity_set(spu, raw_smp_processor_id());
Arnd Bergmann86767272006-10-04 17:26:21 +0200246 spu_switch_notify(spu, ctx);
Christoph Hellwig81998ba2007-02-13 21:36:48 +0100247 ctx->state = SPU_STATE_RUNNABLE;
Andre Detsch27ec41d2007-07-20 21:39:33 +0200248
249 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500250}
251
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200252/*
Christoph Hellwig486acd42007-07-20 21:39:54 +0200253 * Must be used with the list_mutex held.
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200254 */
255static inline int sched_spu(struct spu *spu)
256{
Christoph Hellwig486acd42007-07-20 21:39:54 +0200257 BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex));
258
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200259 return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
260}
261
262static void aff_merge_remaining_ctxs(struct spu_gang *gang)
263{
264 struct spu_context *ctx;
265
266 list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
267 if (list_empty(&ctx->aff_list))
268 list_add(&ctx->aff_list, &gang->aff_list_head);
269 }
270 gang->aff_flags |= AFF_MERGED;
271}
272
273static void aff_set_offsets(struct spu_gang *gang)
274{
275 struct spu_context *ctx;
276 int offset;
277
278 offset = -1;
279 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
280 aff_list) {
281 if (&ctx->aff_list == &gang->aff_list_head)
282 break;
283 ctx->aff_offset = offset--;
284 }
285
286 offset = 0;
287 list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
288 if (&ctx->aff_list == &gang->aff_list_head)
289 break;
290 ctx->aff_offset = offset++;
291 }
292
293 gang->aff_flags |= AFF_OFFSETS_SET;
294}
295
296static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
297 int group_size, int lowest_offset)
298{
299 struct spu *spu;
300 int node, n;
301
302 /*
303 * TODO: A better algorithm could be used to find a good spu to be
304 * used as reference location for the ctxs chain.
305 */
306 node = cpu_to_node(raw_smp_processor_id());
307 for (n = 0; n < MAX_NUMNODES; n++, node++) {
308 node = (node < MAX_NUMNODES) ? node : 0;
309 if (!node_allowed(ctx, node))
310 continue;
Christoph Hellwig486acd42007-07-20 21:39:54 +0200311 mutex_lock(&cbe_spu_info[node].list_mutex);
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200312 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
313 if ((!mem_aff || spu->has_mem_affinity) &&
Christoph Hellwig486acd42007-07-20 21:39:54 +0200314 sched_spu(spu)) {
315 mutex_unlock(&cbe_spu_info[node].list_mutex);
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200316 return spu;
Christoph Hellwig486acd42007-07-20 21:39:54 +0200317 }
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200318 }
Christoph Hellwig486acd42007-07-20 21:39:54 +0200319 mutex_unlock(&cbe_spu_info[node].list_mutex);
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200320 }
321 return NULL;
322}
323
324static void aff_set_ref_point_location(struct spu_gang *gang)
325{
326 int mem_aff, gs, lowest_offset;
327 struct spu_context *ctx;
328 struct spu *tmp;
329
330 mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
331 lowest_offset = 0;
332 gs = 0;
333
334 list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
335 gs++;
336
337 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
338 aff_list) {
339 if (&ctx->aff_list == &gang->aff_list_head)
340 break;
341 lowest_offset = ctx->aff_offset;
342 }
343
Andre Detsch683e3ab2007-07-31 09:48:11 +1000344 gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs,
345 lowest_offset);
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200346}
347
Christoph Hellwig486acd42007-07-20 21:39:54 +0200348static struct spu *ctx_location(struct spu *ref, int offset, int node)
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200349{
350 struct spu *spu;
351
352 spu = NULL;
353 if (offset >= 0) {
354 list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
Christoph Hellwig486acd42007-07-20 21:39:54 +0200355 BUG_ON(spu->node != node);
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200356 if (offset == 0)
357 break;
358 if (sched_spu(spu))
359 offset--;
360 }
361 } else {
362 list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
Christoph Hellwig486acd42007-07-20 21:39:54 +0200363 BUG_ON(spu->node != node);
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200364 if (offset == 0)
365 break;
366 if (sched_spu(spu))
367 offset++;
368 }
369 }
Christoph Hellwig486acd42007-07-20 21:39:54 +0200370
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200371 return spu;
372}
373
374/*
375 * affinity_check is called each time a context is going to be scheduled.
376 * It returns the spu ptr on which the context must run.
377 */
Christoph Hellwig486acd42007-07-20 21:39:54 +0200378static int has_affinity(struct spu_context *ctx)
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200379{
Christoph Hellwig486acd42007-07-20 21:39:54 +0200380 struct spu_gang *gang = ctx->gang;
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200381
382 if (list_empty(&ctx->aff_list))
Christoph Hellwig486acd42007-07-20 21:39:54 +0200383 return 0;
384
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200385 if (!gang->aff_ref_spu) {
386 if (!(gang->aff_flags & AFF_MERGED))
387 aff_merge_remaining_ctxs(gang);
388 if (!(gang->aff_flags & AFF_OFFSETS_SET))
389 aff_set_offsets(gang);
390 aff_set_ref_point_location(gang);
391 }
Christoph Hellwig486acd42007-07-20 21:39:54 +0200392
393 return gang->aff_ref_spu != NULL;
Arnd Bergmannc5fc8d22007-07-20 21:39:48 +0200394}
395
Christoph Hellwig202557d2007-02-13 21:36:49 +0100396/**
397 * spu_unbind_context - unbind spu context from physical spu
398 * @spu: physical spu to unbind from
399 * @ctx: context to unbind
Christoph Hellwig202557d2007-02-13 21:36:49 +0100400 */
Christoph Hellwig678b2ff2007-02-13 21:54:25 +0100401static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500402{
Christoph Hellwig038200c2008-01-11 15:03:26 +1100403 spu_context_trace(spu_unbind_context__enter, ctx, spu);
404
Andre Detsch27ec41d2007-07-20 21:39:33 +0200405 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000406
Arnd Bergmannaa6d5b22007-07-20 21:39:44 +0200407 if (spu->ctx->flags & SPU_CREATE_NOSCHED)
408 atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
Andre Detsch36ddbb12007-09-19 14:38:12 +1000409
410 if (ctx->gang){
411 mutex_lock(&ctx->gang->aff_mutex);
412 if (has_affinity(ctx)) {
413 if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
414 ctx->gang->aff_ref_spu = NULL;
415 }
416 mutex_unlock(&ctx->gang->aff_mutex);
417 }
418
Arnd Bergmann86767272006-10-04 17:26:21 +0200419 spu_switch_notify(spu, NULL);
Arnd Bergmann51104592005-12-05 22:52:25 -0500420 spu_unmap_mappings(ctx);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500421 spu_save(&ctx->csa, spu);
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500422 spu->timestamp = jiffies;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500423 ctx->state = SPU_STATE_SAVED;
424 spu->ibox_callback = NULL;
425 spu->wbox_callback = NULL;
Arnd Bergmann51104592005-12-05 22:52:25 -0500426 spu->stop_callback = NULL;
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100427 spu->mfc_callback = NULL;
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100428 spu_associate_mm(spu, NULL);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500429 spu->pid = 0;
Bob Nelson14748552007-07-20 21:39:53 +0200430 spu->tgid = 0;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500431 ctx->ops = &spu_backing_ops;
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500432 spu->flags = 0;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500433 spu->ctx = NULL;
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +1000434
435 ctx->stats.slb_flt +=
436 (spu->stats.slb_flt - ctx->stats.slb_flt_base);
437 ctx->stats.class2_intr +=
438 (spu->stats.class2_intr - ctx->stats.class2_intr_base);
Andre Detsch27ec41d2007-07-20 21:39:33 +0200439
440 /* This maps the underlying spu state to idle */
441 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
442 ctx->spu = NULL;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500443}
444
Christoph Hellwig079cdb62007-02-13 21:54:23 +0100445/**
446 * spu_add_to_rq - add a context to the runqueue
447 * @ctx: context to add
448 */
Luke Browning4e0f4ed2007-04-23 21:08:13 +0200449static void __spu_add_to_rq(struct spu_context *ctx)
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500450{
Christoph Hellwig27449972007-06-29 10:58:06 +1000451 /*
452 * Unfortunately this code path can be called from multiple threads
453 * on behalf of a single context due to the way the problem state
454 * mmap support works.
455 *
456 * Fortunately we need to wake up all these threads at the same time
457 * and can simply skip the runqueue addition for every but the first
458 * thread getting into this codepath.
459 *
460 * It's still quite hacky, and long-term we should proxy all other
461 * threads through the owner thread so that spu_run is in control
462 * of all the scheduling activity for a given context.
463 */
464 if (list_empty(&ctx->rq)) {
465 list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
466 set_bit(ctx->prio, spu_prio->bitmap);
467 if (!spu_prio->nr_waiting++)
468 __mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
469 }
Mark Nuttera68cf982006-10-04 17:26:12 +0200470}
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500471
Luke Browninge65c2f62007-12-20 16:39:59 +0900472static void spu_add_to_rq(struct spu_context *ctx)
473{
474 spin_lock(&spu_prio->runq_lock);
475 __spu_add_to_rq(ctx);
476 spin_unlock(&spu_prio->runq_lock);
477}
478
Luke Browning4e0f4ed2007-04-23 21:08:13 +0200479static void __spu_del_from_rq(struct spu_context *ctx)
Christoph Hellwiga475c2f2007-04-23 21:08:11 +0200480{
Luke Browning4e0f4ed2007-04-23 21:08:13 +0200481 int prio = ctx->prio;
482
Christoph Hellwig65de66f2007-06-29 10:58:02 +1000483 if (!list_empty(&ctx->rq)) {
Christoph Hellwigc77239b2007-06-29 10:58:05 +1000484 if (!--spu_prio->nr_waiting)
485 del_timer(&spusched_timer);
Christoph Hellwiga475c2f2007-04-23 21:08:11 +0200486 list_del_init(&ctx->rq);
Christoph Hellwigc77239b2007-06-29 10:58:05 +1000487
488 if (list_empty(&spu_prio->runq[prio]))
489 clear_bit(prio, spu_prio->bitmap);
Christoph Hellwig65de66f2007-06-29 10:58:02 +1000490 }
Mark Nuttera68cf982006-10-04 17:26:12 +0200491}
492
Luke Browninge65c2f62007-12-20 16:39:59 +0900493void spu_del_from_rq(struct spu_context *ctx)
494{
495 spin_lock(&spu_prio->runq_lock);
496 __spu_del_from_rq(ctx);
497 spin_unlock(&spu_prio->runq_lock);
498}
499
Christoph Hellwig079cdb62007-02-13 21:54:23 +0100500static void spu_prio_wait(struct spu_context *ctx)
501{
Mark Nuttera68cf982006-10-04 17:26:12 +0200502 DEFINE_WAIT(wait);
503
Luke Browninge65c2f62007-12-20 16:39:59 +0900504 /*
505 * The caller must explicitly wait for a context to be loaded
506 * if the nosched flag is set. If NOSCHED is not set, the caller
507 * queues the context and waits for an spu event or error.
508 */
509 BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED));
510
Luke Browning4e0f4ed2007-04-23 21:08:13 +0200511 spin_lock(&spu_prio->runq_lock);
Christoph Hellwig079cdb62007-02-13 21:54:23 +0100512 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
Mark Nuttera68cf982006-10-04 17:26:12 +0200513 if (!signal_pending(current)) {
Luke Browning4e0f4ed2007-04-23 21:08:13 +0200514 __spu_add_to_rq(ctx);
515 spin_unlock(&spu_prio->runq_lock);
Christoph Hellwig650f8b02007-02-13 21:36:50 +0100516 mutex_unlock(&ctx->state_mutex);
Mark Nuttera68cf982006-10-04 17:26:12 +0200517 schedule();
Christoph Hellwig650f8b02007-02-13 21:36:50 +0100518 mutex_lock(&ctx->state_mutex);
Luke Browning4e0f4ed2007-04-23 21:08:13 +0200519 spin_lock(&spu_prio->runq_lock);
520 __spu_del_from_rq(ctx);
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500521 }
Luke Browning4e0f4ed2007-04-23 21:08:13 +0200522 spin_unlock(&spu_prio->runq_lock);
Christoph Hellwig079cdb62007-02-13 21:54:23 +0100523 __set_current_state(TASK_RUNNING);
524 remove_wait_queue(&ctx->stop_wq, &wait);
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500525}
526
Christoph Hellwig079cdb62007-02-13 21:54:23 +0100527static struct spu *spu_get_idle(struct spu_context *ctx)
Mark Nuttera68cf982006-10-04 17:26:12 +0200528{
Andre Detsch36ddbb12007-09-19 14:38:12 +1000529 struct spu *spu, *aff_ref_spu;
Christoph Hellwig486acd42007-07-20 21:39:54 +0200530 int node, n;
Mark Nuttera68cf982006-10-04 17:26:12 +0200531
Christoph Hellwig038200c2008-01-11 15:03:26 +1100532 spu_context_nospu_trace(spu_get_idle__enter, ctx);
533
Andre Detsch36ddbb12007-09-19 14:38:12 +1000534 if (ctx->gang) {
535 mutex_lock(&ctx->gang->aff_mutex);
536 if (has_affinity(ctx)) {
537 aff_ref_spu = ctx->gang->aff_ref_spu;
538 atomic_inc(&ctx->gang->aff_sched_count);
539 mutex_unlock(&ctx->gang->aff_mutex);
540 node = aff_ref_spu->node;
Arnd Bergmanncbc23d32007-07-20 21:39:49 +0200541
Andre Detsch36ddbb12007-09-19 14:38:12 +1000542 mutex_lock(&cbe_spu_info[node].list_mutex);
543 spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
544 if (spu && spu->alloc_state == SPU_FREE)
545 goto found;
546 mutex_unlock(&cbe_spu_info[node].list_mutex);
547
548 mutex_lock(&ctx->gang->aff_mutex);
549 if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
550 ctx->gang->aff_ref_spu = NULL;
551 mutex_unlock(&ctx->gang->aff_mutex);
Christoph Hellwig038200c2008-01-11 15:03:26 +1100552 goto not_found;
Andre Detsch36ddbb12007-09-19 14:38:12 +1000553 }
554 mutex_unlock(&ctx->gang->aff_mutex);
Christoph Hellwig486acd42007-07-20 21:39:54 +0200555 }
Christoph Hellwig486acd42007-07-20 21:39:54 +0200556 node = cpu_to_node(raw_smp_processor_id());
Mark Nuttera68cf982006-10-04 17:26:12 +0200557 for (n = 0; n < MAX_NUMNODES; n++, node++) {
558 node = (node < MAX_NUMNODES) ? node : 0;
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000559 if (!node_allowed(ctx, node))
Mark Nuttera68cf982006-10-04 17:26:12 +0200560 continue;
Christoph Hellwig486acd42007-07-20 21:39:54 +0200561
562 mutex_lock(&cbe_spu_info[node].list_mutex);
563 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
564 if (spu->alloc_state == SPU_FREE)
565 goto found;
566 }
567 mutex_unlock(&cbe_spu_info[node].list_mutex);
Mark Nuttera68cf982006-10-04 17:26:12 +0200568 }
Christoph Hellwig486acd42007-07-20 21:39:54 +0200569
Christoph Hellwig038200c2008-01-11 15:03:26 +1100570 not_found:
571 spu_context_nospu_trace(spu_get_idle__not_found, ctx);
Christoph Hellwig486acd42007-07-20 21:39:54 +0200572 return NULL;
573
574 found:
575 spu->alloc_state = SPU_USED;
576 mutex_unlock(&cbe_spu_info[node].list_mutex);
Christoph Hellwig038200c2008-01-11 15:03:26 +1100577 spu_context_trace(spu_get_idle__found, ctx, spu);
Christoph Hellwig486acd42007-07-20 21:39:54 +0200578 spu_init_channels(spu);
Mark Nuttera68cf982006-10-04 17:26:12 +0200579 return spu;
580}
581
Christoph Hellwig079cdb62007-02-13 21:54:23 +0100582/**
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100583 * find_victim - find a lower priority context to preempt
584 * @ctx: canidate context for running
585 *
586 * Returns the freed physical spu to run the new context on.
587 */
588static struct spu *find_victim(struct spu_context *ctx)
589{
590 struct spu_context *victim = NULL;
591 struct spu *spu;
592 int node, n;
593
Christoph Hellwig038200c2008-01-11 15:03:26 +1100594 spu_context_nospu_trace(spu_find_vitim__enter, ctx);
595
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100596 /*
597 * Look for a possible preemption candidate on the local node first.
598 * If there is no candidate look at the other nodes. This isn't
Julio M. Merino Vidal9b1d21f2007-12-20 16:39:59 +0900599 * exactly fair, but so far the whole spu scheduler tries to keep
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100600 * a strong node affinity. We might want to fine-tune this in
601 * the future.
602 */
603 restart:
604 node = cpu_to_node(raw_smp_processor_id());
605 for (n = 0; n < MAX_NUMNODES; n++, node++) {
606 node = (node < MAX_NUMNODES) ? node : 0;
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000607 if (!node_allowed(ctx, node))
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100608 continue;
609
Christoph Hellwig486acd42007-07-20 21:39:54 +0200610 mutex_lock(&cbe_spu_info[node].list_mutex);
611 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100612 struct spu_context *tmp = spu->ctx;
613
Christoph Hellwigc0e7b4a2007-09-19 14:38:12 +1000614 if (tmp && tmp->prio > ctx->prio &&
Luke Browninge65c2f62007-12-20 16:39:59 +0900615 !(tmp->flags & SPU_CREATE_NOSCHED) &&
Christoph Hellwigfe443ef2007-06-29 10:57:52 +1000616 (!victim || tmp->prio > victim->prio))
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100617 victim = spu->ctx;
618 }
Christoph Hellwig486acd42007-07-20 21:39:54 +0200619 mutex_unlock(&cbe_spu_info[node].list_mutex);
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100620
621 if (victim) {
622 /*
623 * This nests ctx->state_mutex, but we always lock
624 * higher priority contexts before lower priority
625 * ones, so this is safe until we introduce
626 * priority inheritance schemes.
Luke Browning91569532007-12-20 16:39:59 +0900627 *
628 * XXX if the highest priority context is locked,
629 * this can loop a long time. Might be better to
630 * look at another context or give up after X retries.
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100631 */
632 if (!mutex_trylock(&victim->state_mutex)) {
633 victim = NULL;
634 goto restart;
635 }
636
637 spu = victim->spu;
Luke Browningb1925412007-12-20 16:39:59 +0900638 if (!spu || victim->prio <= ctx->prio) {
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100639 /*
640 * This race can happen because we've dropped
Luke Browningb1925412007-12-20 16:39:59 +0900641 * the active list mutex. Not a problem, just
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100642 * restart the search.
643 */
644 mutex_unlock(&victim->state_mutex);
645 victim = NULL;
646 goto restart;
647 }
Christoph Hellwig486acd42007-07-20 21:39:54 +0200648
Christoph Hellwig038200c2008-01-11 15:03:26 +1100649 spu_context_trace(__spu_deactivate__unload, ctx, spu);
650
Christoph Hellwig486acd42007-07-20 21:39:54 +0200651 mutex_lock(&cbe_spu_info[node].list_mutex);
652 cbe_spu_info[node].nr_active--;
Christoph Hellwigc0e7b4a2007-09-19 14:38:12 +1000653 spu_unbind_context(spu, victim);
Christoph Hellwig486acd42007-07-20 21:39:54 +0200654 mutex_unlock(&cbe_spu_info[node].list_mutex);
655
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +1000656 victim->stats.invol_ctx_switch++;
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000657 spu->stats.invol_ctx_switch++;
Luke Browninge65c2f62007-12-20 16:39:59 +0900658 spu_add_to_rq(victim);
659
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100660 mutex_unlock(&victim->state_mutex);
Luke Browninge65c2f62007-12-20 16:39:59 +0900661
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100662 return spu;
663 }
664 }
665
666 return NULL;
667}
668
Luke Browninge65c2f62007-12-20 16:39:59 +0900669static void __spu_schedule(struct spu *spu, struct spu_context *ctx)
670{
671 int node = spu->node;
672 int success = 0;
673
674 spu_set_timeslice(ctx);
675
676 mutex_lock(&cbe_spu_info[node].list_mutex);
677 if (spu->ctx == NULL) {
678 spu_bind_context(spu, ctx);
679 cbe_spu_info[node].nr_active++;
680 spu->alloc_state = SPU_USED;
681 success = 1;
682 }
683 mutex_unlock(&cbe_spu_info[node].list_mutex);
684
685 if (success)
686 wake_up_all(&ctx->run_wq);
687 else
688 spu_add_to_rq(ctx);
689}
690
691static void spu_schedule(struct spu *spu, struct spu_context *ctx)
692{
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900693 /* not a candidate for interruptible because it's called either
694 from the scheduler thread or from spu_deactivate */
695 mutex_lock(&ctx->state_mutex);
Luke Browninge65c2f62007-12-20 16:39:59 +0900696 __spu_schedule(spu, ctx);
697 spu_release(ctx);
698}
699
700static void spu_unschedule(struct spu *spu, struct spu_context *ctx)
701{
702 int node = spu->node;
703
704 mutex_lock(&cbe_spu_info[node].list_mutex);
705 cbe_spu_info[node].nr_active--;
706 spu->alloc_state = SPU_FREE;
707 spu_unbind_context(spu, ctx);
708 ctx->stats.invol_ctx_switch++;
709 spu->stats.invol_ctx_switch++;
710 mutex_unlock(&cbe_spu_info[node].list_mutex);
711}
712
Christoph Hellwig52f04fc2007-02-13 21:54:27 +0100713/**
Christoph Hellwig079cdb62007-02-13 21:54:23 +0100714 * spu_activate - find a free spu for a context and execute it
715 * @ctx: spu context to schedule
716 * @flags: flags (currently ignored)
717 *
Christoph Hellwig08873092007-04-23 21:08:06 +0200718 * Tries to find a free spu to run @ctx. If no free spu is available
Christoph Hellwig079cdb62007-02-13 21:54:23 +0100719 * add the context to the runqueue so it gets woken up once an spu
720 * is available.
721 */
Christoph Hellwig26bec672007-02-13 21:54:24 +0100722int spu_activate(struct spu_context *ctx, unsigned long flags)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500723{
Luke Browninge65c2f62007-12-20 16:39:59 +0900724 struct spu *spu;
Christoph Hellwig079cdb62007-02-13 21:54:23 +0100725
Luke Browninge65c2f62007-12-20 16:39:59 +0900726 /*
727 * If there are multiple threads waiting for a single context
728 * only one actually binds the context while the others will
729 * only be able to acquire the state_mutex once the context
730 * already is in runnable state.
731 */
732 if (ctx->spu)
733 return 0;
Christoph Hellwig27449972007-06-29 10:58:06 +1000734
Luke Browninge65c2f62007-12-20 16:39:59 +0900735spu_activate_top:
736 if (signal_pending(current))
737 return -ERESTARTSYS;
Christoph Hellwig486acd42007-07-20 21:39:54 +0200738
Luke Browninge65c2f62007-12-20 16:39:59 +0900739 spu = spu_get_idle(ctx);
740 /*
741 * If this is a realtime thread we try to get it running by
742 * preempting a lower priority thread.
743 */
744 if (!spu && rt_prio(ctx->prio))
745 spu = find_victim(ctx);
746 if (spu) {
747 unsigned long runcntl;
Christoph Hellwig079cdb62007-02-13 21:54:23 +0100748
Luke Browninge65c2f62007-12-20 16:39:59 +0900749 runcntl = ctx->ops->runcntl_read(ctx);
750 __spu_schedule(spu, ctx);
751 if (runcntl & SPU_RUNCNTL_RUNNABLE)
752 spuctx_switch_state(ctx, SPU_UTIL_USER);
753
754 return 0;
755 }
756
757 if (ctx->flags & SPU_CREATE_NOSCHED) {
Christoph Hellwig50b520d2007-03-10 00:05:36 +0100758 spu_prio_wait(ctx);
Luke Browninge65c2f62007-12-20 16:39:59 +0900759 goto spu_activate_top;
760 }
Christoph Hellwig079cdb62007-02-13 21:54:23 +0100761
Luke Browninge65c2f62007-12-20 16:39:59 +0900762 spu_add_to_rq(ctx);
763
764 return 0;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500765}
766
Christoph Hellwig678b2ff2007-02-13 21:54:25 +0100767/**
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000768 * grab_runnable_context - try to find a runnable context
769 *
770 * Remove the highest priority context on the runqueue and return it
771 * to the caller. Returns %NULL if no runnable context was found.
772 */
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000773static struct spu_context *grab_runnable_context(int prio, int node)
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000774{
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000775 struct spu_context *ctx;
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000776 int best;
777
778 spin_lock(&spu_prio->runq_lock);
Masato Noguchi7e90b742007-07-20 21:39:43 +0200779 best = find_first_bit(spu_prio->bitmap, prio);
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000780 while (best < prio) {
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000781 struct list_head *rq = &spu_prio->runq[best];
782
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000783 list_for_each_entry(ctx, rq, rq) {
784 /* XXX(hch): check for affinity here aswell */
785 if (__node_allowed(ctx, node)) {
786 __spu_del_from_rq(ctx);
787 goto found;
788 }
789 }
790 best++;
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000791 }
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000792 ctx = NULL;
793 found:
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000794 spin_unlock(&spu_prio->runq_lock);
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000795 return ctx;
796}
797
798static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
799{
800 struct spu *spu = ctx->spu;
801 struct spu_context *new = NULL;
802
803 if (spu) {
Christoph Hellwigea1ae592007-06-29 10:57:56 +1000804 new = grab_runnable_context(max_prio, spu->node);
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000805 if (new || force) {
Luke Browninge65c2f62007-12-20 16:39:59 +0900806 spu_unschedule(spu, ctx);
807 if (new) {
808 if (new->flags & SPU_CREATE_NOSCHED)
809 wake_up(&new->stop_wq);
810 else {
811 spu_release(ctx);
812 spu_schedule(spu, new);
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900813 /* this one can't easily be made
814 interruptible */
815 mutex_lock(&ctx->state_mutex);
Luke Browninge65c2f62007-12-20 16:39:59 +0900816 }
817 }
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000818 }
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000819 }
820
821 return new != NULL;
822}
823
824/**
Christoph Hellwig678b2ff2007-02-13 21:54:25 +0100825 * spu_deactivate - unbind a context from it's physical spu
826 * @ctx: spu context to unbind
827 *
828 * Unbind @ctx from the physical spu it is running on and schedule
829 * the highest priority context to run on the freed physical spu.
830 */
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500831void spu_deactivate(struct spu_context *ctx)
832{
Christoph Hellwig038200c2008-01-11 15:03:26 +1100833 spu_context_nospu_trace(spu_deactivate__enter, ctx);
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000834 __spu_deactivate(ctx, 1, MAX_PRIO);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500835}
836
Christoph Hellwigae7b4c52007-02-13 21:54:26 +0100837/**
Bob Nelson14748552007-07-20 21:39:53 +0200838 * spu_yield - yield a physical spu if others are waiting
Christoph Hellwigae7b4c52007-02-13 21:54:26 +0100839 * @ctx: spu context to yield
840 *
841 * Check if there is a higher priority context waiting and if yes
842 * unbind @ctx from the physical spu and schedule the highest
843 * priority context to run on the freed physical spu instead.
844 */
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500845void spu_yield(struct spu_context *ctx)
846{
Christoph Hellwig038200c2008-01-11 15:03:26 +1100847 spu_context_nospu_trace(spu_yield__enter, ctx);
Christoph Hellwige5c0b9e2007-06-05 11:25:59 +1000848 if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
849 mutex_lock(&ctx->state_mutex);
Andre Detsch27ec41d2007-07-20 21:39:33 +0200850 __spu_deactivate(ctx, 0, MAX_PRIO);
Christoph Hellwige5c0b9e2007-06-05 11:25:59 +1000851 mutex_unlock(&ctx->state_mutex);
852 }
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000853}
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500854
Christoph Hellwig486acd42007-07-20 21:39:54 +0200855static noinline void spusched_tick(struct spu_context *ctx)
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000856{
Luke Browninge65c2f62007-12-20 16:39:59 +0900857 struct spu_context *new = NULL;
858 struct spu *spu = NULL;
Luke Browninge65c2f62007-12-20 16:39:59 +0900859
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900860 if (spu_acquire(ctx))
861 BUG(); /* a kernel thread never has signals pending */
Luke Browninge65c2f62007-12-20 16:39:59 +0900862
863 if (ctx->state != SPU_STATE_RUNNABLE)
864 goto out;
Christoph Hellwigdf09cf32007-06-29 10:57:58 +1000865 if (ctx->flags & SPU_CREATE_NOSCHED)
Luke Browninge65c2f62007-12-20 16:39:59 +0900866 goto out;
Christoph Hellwigdf09cf32007-06-29 10:57:58 +1000867 if (ctx->policy == SCHED_FIFO)
Luke Browninge65c2f62007-12-20 16:39:59 +0900868 goto out;
Christoph Hellwigdf09cf32007-06-29 10:57:58 +1000869
Jeremy Kerr4ef11012008-02-19 10:05:35 +1100870 if (--ctx->time_slice && ctx->policy != SCHED_IDLE)
Luke Browninge65c2f62007-12-20 16:39:59 +0900871 goto out;
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000872
Luke Browninge65c2f62007-12-20 16:39:59 +0900873 spu = ctx->spu;
Christoph Hellwig038200c2008-01-11 15:03:26 +1100874
875 spu_context_trace(spusched_tick__preempt, ctx, spu);
876
Luke Browninge65c2f62007-12-20 16:39:59 +0900877 new = grab_runnable_context(ctx->prio + 1, spu->node);
878 if (new) {
879 spu_unschedule(spu, ctx);
Jeremy Kerr4ef11012008-02-19 10:05:35 +1100880 if (ctx->policy != SCHED_IDLE)
881 spu_add_to_rq(ctx);
Christoph Hellwigbb5db292007-06-04 23:26:51 +1000882 } else {
Christoph Hellwig038200c2008-01-11 15:03:26 +1100883 spu_context_nospu_trace(spusched_tick__newslice, ctx);
Christoph Hellwig37901802007-06-29 10:57:51 +1000884 ctx->time_slice++;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500885 }
Luke Browninge65c2f62007-12-20 16:39:59 +0900886out:
887 spu_release(ctx);
888
889 if (new)
890 spu_schedule(spu, new);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500891}
892
Christoph Hellwig65de66f2007-06-29 10:58:02 +1000893/**
894 * count_active_contexts - count nr of active tasks
895 *
896 * Return the number of tasks currently running or waiting to run.
897 *
Christoph Hellwig486acd42007-07-20 21:39:54 +0200898 * Note that we don't take runq_lock / list_mutex here. Reading
Christoph Hellwig65de66f2007-06-29 10:58:02 +1000899 * a single 32bit value is atomic on powerpc, and we don't care
900 * about memory ordering issues here.
901 */
902static unsigned long count_active_contexts(void)
903{
904 int nr_active = 0, node;
905
906 for (node = 0; node < MAX_NUMNODES; node++)
Christoph Hellwig486acd42007-07-20 21:39:54 +0200907 nr_active += cbe_spu_info[node].nr_active;
Christoph Hellwig65de66f2007-06-29 10:58:02 +1000908 nr_active += spu_prio->nr_waiting;
909
910 return nr_active;
911}
912
913/**
Aegis Lin90608a22007-12-20 16:39:59 +0900914 * spu_calc_load - update the avenrun load estimates.
Christoph Hellwig65de66f2007-06-29 10:58:02 +1000915 *
916 * No locking against reading these values from userspace, as for
917 * the CPU loadavg code.
918 */
Aegis Lin90608a22007-12-20 16:39:59 +0900919static void spu_calc_load(void)
Christoph Hellwig65de66f2007-06-29 10:58:02 +1000920{
921 unsigned long active_tasks; /* fixed-point */
Christoph Hellwig65de66f2007-06-29 10:58:02 +1000922
Aegis Lin90608a22007-12-20 16:39:59 +0900923 active_tasks = count_active_contexts() * FIXED_1;
924 CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
925 CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
926 CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
Christoph Hellwig65de66f2007-06-29 10:58:02 +1000927}
928
Christoph Hellwig37901802007-06-29 10:57:51 +1000929static void spusched_wake(unsigned long data)
930{
931 mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
932 wake_up_process(spusched_task);
Aegis Lin90608a22007-12-20 16:39:59 +0900933}
934
935static void spuloadavg_wake(unsigned long data)
936{
937 mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ);
938 spu_calc_load();
Christoph Hellwig37901802007-06-29 10:57:51 +1000939}
940
941static int spusched_thread(void *unused)
942{
Christoph Hellwig486acd42007-07-20 21:39:54 +0200943 struct spu *spu;
Christoph Hellwig37901802007-06-29 10:57:51 +1000944 int node;
945
Christoph Hellwig37901802007-06-29 10:57:51 +1000946 while (!kthread_should_stop()) {
947 set_current_state(TASK_INTERRUPTIBLE);
948 schedule();
949 for (node = 0; node < MAX_NUMNODES; node++) {
Luke Browninge65c2f62007-12-20 16:39:59 +0900950 struct mutex *mtx = &cbe_spu_info[node].list_mutex;
951
952 mutex_lock(mtx);
953 list_for_each_entry(spu, &cbe_spu_info[node].spus,
954 cbe_list) {
955 struct spu_context *ctx = spu->ctx;
956
957 if (ctx) {
958 mutex_unlock(mtx);
959 spusched_tick(ctx);
960 mutex_lock(mtx);
961 }
962 }
963 mutex_unlock(mtx);
Christoph Hellwig37901802007-06-29 10:57:51 +1000964 }
965 }
966
Christoph Hellwig37901802007-06-29 10:57:51 +1000967 return 0;
968}
969
Jeremy Kerr7cd58e42007-12-20 16:39:59 +0900970void spuctx_switch_state(struct spu_context *ctx,
971 enum spu_utilization_state new_state)
972{
973 unsigned long long curtime;
974 signed long long delta;
975 struct timespec ts;
976 struct spu *spu;
977 enum spu_utilization_state old_state;
978
979 ktime_get_ts(&ts);
980 curtime = timespec_to_ns(&ts);
981 delta = curtime - ctx->stats.tstamp;
982
983 WARN_ON(!mutex_is_locked(&ctx->state_mutex));
984 WARN_ON(delta < 0);
985
986 spu = ctx->spu;
987 old_state = ctx->stats.util_state;
988 ctx->stats.util_state = new_state;
989 ctx->stats.tstamp = curtime;
990
991 /*
992 * Update the physical SPU utilization statistics.
993 */
994 if (spu) {
995 ctx->stats.times[old_state] += delta;
996 spu->stats.times[old_state] += delta;
997 spu->stats.util_state = new_state;
998 spu->stats.tstamp = curtime;
999 }
1000}
1001
Christoph Hellwig65de66f2007-06-29 10:58:02 +10001002#define LOAD_INT(x) ((x) >> FSHIFT)
1003#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
1004
1005static int show_spu_loadavg(struct seq_file *s, void *private)
1006{
1007 int a, b, c;
1008
1009 a = spu_avenrun[0] + (FIXED_1/200);
1010 b = spu_avenrun[1] + (FIXED_1/200);
1011 c = spu_avenrun[2] + (FIXED_1/200);
1012
1013 /*
1014 * Note that last_pid doesn't really make much sense for the
Julio M. Merino Vidal9b1d21f2007-12-20 16:39:59 +09001015 * SPU loadavg (it even seems very odd on the CPU side...),
Christoph Hellwig65de66f2007-06-29 10:58:02 +10001016 * but we include it here to have a 100% compatible interface.
1017 */
1018 seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
1019 LOAD_INT(a), LOAD_FRAC(a),
1020 LOAD_INT(b), LOAD_FRAC(b),
1021 LOAD_INT(c), LOAD_FRAC(c),
1022 count_active_contexts(),
1023 atomic_read(&nr_spu_contexts),
1024 current->nsproxy->pid_ns->last_pid);
1025 return 0;
1026}
1027
1028static int spu_loadavg_open(struct inode *inode, struct file *file)
1029{
1030 return single_open(file, show_spu_loadavg, NULL);
1031}
1032
1033static const struct file_operations spu_loadavg_fops = {
1034 .open = spu_loadavg_open,
1035 .read = seq_read,
1036 .llseek = seq_lseek,
1037 .release = single_release,
1038};
1039
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001040int __init spu_sched_init(void)
1041{
Christoph Hellwig65de66f2007-06-29 10:58:02 +10001042 struct proc_dir_entry *entry;
1043 int err = -ENOMEM, i;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001044
Mark Nuttera68cf982006-10-04 17:26:12 +02001045 spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
Christoph Hellwig37901802007-06-29 10:57:51 +10001046 if (!spu_prio)
Christoph Hellwig65de66f2007-06-29 10:58:02 +10001047 goto out;
Christoph Hellwig37901802007-06-29 10:57:51 +10001048
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001049 for (i = 0; i < MAX_PRIO; i++) {
Christoph Hellwig079cdb62007-02-13 21:54:23 +01001050 INIT_LIST_HEAD(&spu_prio->runq[i]);
Mark Nuttera68cf982006-10-04 17:26:12 +02001051 __clear_bit(i, spu_prio->bitmap);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001052 }
Christoph Hellwig079cdb62007-02-13 21:54:23 +01001053 spin_lock_init(&spu_prio->runq_lock);
Christoph Hellwig37901802007-06-29 10:57:51 +10001054
Christoph Hellwigc77239b2007-06-29 10:58:05 +10001055 setup_timer(&spusched_timer, spusched_wake, 0);
Aegis Lin90608a22007-12-20 16:39:59 +09001056 setup_timer(&spuloadavg_timer, spuloadavg_wake, 0);
Christoph Hellwigc77239b2007-06-29 10:58:05 +10001057
Christoph Hellwig37901802007-06-29 10:57:51 +10001058 spusched_task = kthread_run(spusched_thread, NULL, "spusched");
1059 if (IS_ERR(spusched_task)) {
Christoph Hellwig65de66f2007-06-29 10:58:02 +10001060 err = PTR_ERR(spusched_task);
1061 goto out_free_spu_prio;
Christoph Hellwig37901802007-06-29 10:57:51 +10001062 }
Jeremy Kerrf3f59be2007-06-29 10:57:54 +10001063
Aegis Lin90608a22007-12-20 16:39:59 +09001064 mod_timer(&spuloadavg_timer, 0);
1065
Christoph Hellwig65de66f2007-06-29 10:58:02 +10001066 entry = create_proc_entry("spu_loadavg", 0, NULL);
1067 if (!entry)
1068 goto out_stop_kthread;
1069 entry->proc_fops = &spu_loadavg_fops;
1070
Jeremy Kerrf3f59be2007-06-29 10:57:54 +10001071 pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
1072 SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001073 return 0;
Christoph Hellwig37901802007-06-29 10:57:51 +10001074
Christoph Hellwig65de66f2007-06-29 10:58:02 +10001075 out_stop_kthread:
1076 kthread_stop(spusched_task);
1077 out_free_spu_prio:
1078 kfree(spu_prio);
1079 out:
1080 return err;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001081}
1082
Sebastian Siewiord1450312007-07-20 21:39:29 +02001083void spu_sched_exit(void)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001084{
Christoph Hellwig486acd42007-07-20 21:39:54 +02001085 struct spu *spu;
Mark Nuttera68cf982006-10-04 17:26:12 +02001086 int node;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001087
Christoph Hellwig65de66f2007-06-29 10:58:02 +10001088 remove_proc_entry("spu_loadavg", NULL);
1089
Christoph Hellwigc77239b2007-06-29 10:58:05 +10001090 del_timer_sync(&spusched_timer);
Aegis Lin90608a22007-12-20 16:39:59 +09001091 del_timer_sync(&spuloadavg_timer);
Christoph Hellwig37901802007-06-29 10:57:51 +10001092 kthread_stop(spusched_task);
1093
Mark Nuttera68cf982006-10-04 17:26:12 +02001094 for (node = 0; node < MAX_NUMNODES; node++) {
Christoph Hellwig486acd42007-07-20 21:39:54 +02001095 mutex_lock(&cbe_spu_info[node].list_mutex);
1096 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
1097 if (spu->alloc_state != SPU_FREE)
1098 spu->alloc_state = SPU_FREE;
1099 mutex_unlock(&cbe_spu_info[node].list_mutex);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001100 }
Mark Nuttera68cf982006-10-04 17:26:12 +02001101 kfree(spu_prio);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001102}