blob: 7e9657eb690c4461011d72c13faa6e03923627b0 [file] [log] [blame]
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001/* sched.c - SPU scheduler.
2 *
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
5 *
Mark Nuttera68cf982006-10-04 17:26:12 +02006 * 2006-03-31 NUMA domains added.
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05007 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
Arnd Bergmann3b3d22c2005-12-05 22:52:24 -050023#undef DEBUG
24
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050025#include <linux/module.h>
26#include <linux/errno.h>
27#include <linux/sched.h>
28#include <linux/kernel.h>
29#include <linux/mm.h>
30#include <linux/completion.h>
31#include <linux/vmalloc.h>
32#include <linux/smp.h>
33#include <linux/smp_lock.h>
34#include <linux/stddef.h>
35#include <linux/unistd.h>
Mark Nuttera68cf982006-10-04 17:26:12 +020036#include <linux/numa.h>
37#include <linux/mutex.h>
Arnd Bergmann86767272006-10-04 17:26:21 +020038#include <linux/notifier.h>
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050039
40#include <asm/io.h>
41#include <asm/mmu_context.h>
42#include <asm/spu.h>
43#include <asm/spu_csa.h>
Geoff Levanda91942a2006-06-19 20:33:30 +020044#include <asm/spu_priv1.h>
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050045#include "spufs.h"
46
Arnd Bergmann7945a4a2005-12-09 19:04:16 +010047#define SPU_MIN_TIMESLICE (100 * HZ / 1000)
Arnd Bergmann2a911f02005-12-05 22:52:26 -050048
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050049#define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1)
50struct spu_prio_array {
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050051 unsigned long bitmap[SPU_BITMAP_SIZE];
52 wait_queue_head_t waitq[MAX_PRIO];
Mark Nuttera68cf982006-10-04 17:26:12 +020053 struct list_head active_list[MAX_NUMNODES];
54 struct mutex active_mutex[MAX_NUMNODES];
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050055};
56
Mark Nuttera68cf982006-10-04 17:26:12 +020057static struct spu_prio_array *spu_prio;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050058
Mark Nuttera68cf982006-10-04 17:26:12 +020059static inline int node_allowed(int node)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050060{
Mark Nuttera68cf982006-10-04 17:26:12 +020061 cpumask_t mask;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050062
Mark Nuttera68cf982006-10-04 17:26:12 +020063 if (!nr_cpus_node(node))
64 return 0;
65 mask = node_to_cpumask(node);
66 if (!cpus_intersects(mask, current->cpus_allowed))
67 return 0;
68 return 1;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050069}
70
71static inline void mm_needs_global_tlbie(struct mm_struct *mm)
72{
Mark Nuttera68cf982006-10-04 17:26:12 +020073 int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
74
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050075 /* Global TLBIE broadcast required with SPEs. */
Mark Nuttera68cf982006-10-04 17:26:12 +020076 __cpus_setall(&mm->cpu_vm_mask, nr);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050077}
78
Arnd Bergmann86767272006-10-04 17:26:21 +020079static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
80
81static void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
82{
83 blocking_notifier_call_chain(&spu_switch_notifier,
84 ctx ? ctx->object_id : 0, spu);
85}
86
87int spu_switch_event_register(struct notifier_block * n)
88{
89 return blocking_notifier_chain_register(&spu_switch_notifier, n);
90}
91
92int spu_switch_event_unregister(struct notifier_block * n)
93{
94 return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
95}
96
97
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050098static inline void bind_context(struct spu *spu, struct spu_context *ctx)
99{
Mark Nuttera68cf982006-10-04 17:26:12 +0200100 pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
101 spu->number, spu->node);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500102 spu->ctx = ctx;
103 spu->flags = 0;
104 ctx->spu = spu;
105 ctx->ops = &spu_hw_ops;
106 spu->pid = current->pid;
107 spu->prio = current->prio;
108 spu->mm = ctx->owner;
109 mm_needs_global_tlbie(spu->mm);
110 spu->ibox_callback = spufs_ibox_callback;
111 spu->wbox_callback = spufs_wbox_callback;
Arnd Bergmann51104592005-12-05 22:52:25 -0500112 spu->stop_callback = spufs_stop_callback;
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100113 spu->mfc_callback = spufs_mfc_callback;
Arnd Bergmann9add11d2006-10-04 17:26:14 +0200114 spu->dma_callback = spufs_dma_callback;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500115 mb();
Arnd Bergmann51104592005-12-05 22:52:25 -0500116 spu_unmap_mappings(ctx);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500117 spu_restore(&ctx->csa, spu);
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500118 spu->timestamp = jiffies;
Mark Nuttera68cf982006-10-04 17:26:12 +0200119 spu_cpu_affinity_set(spu, raw_smp_processor_id());
Arnd Bergmann86767272006-10-04 17:26:21 +0200120 spu_switch_notify(spu, ctx);
Christoph Hellwig81998ba2007-02-13 21:36:48 +0100121
122 ctx->state = SPU_STATE_RUNNABLE;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500123}
124
125static inline void unbind_context(struct spu *spu, struct spu_context *ctx)
126{
Mark Nuttera68cf982006-10-04 17:26:12 +0200127 pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
128 spu->pid, spu->number, spu->node);
Arnd Bergmann86767272006-10-04 17:26:21 +0200129 spu_switch_notify(spu, NULL);
Arnd Bergmann51104592005-12-05 22:52:25 -0500130 spu_unmap_mappings(ctx);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500131 spu_save(&ctx->csa, spu);
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500132 spu->timestamp = jiffies;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500133 ctx->state = SPU_STATE_SAVED;
134 spu->ibox_callback = NULL;
135 spu->wbox_callback = NULL;
Arnd Bergmann51104592005-12-05 22:52:25 -0500136 spu->stop_callback = NULL;
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100137 spu->mfc_callback = NULL;
Arnd Bergmann9add11d2006-10-04 17:26:14 +0200138 spu->dma_callback = NULL;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500139 spu->mm = NULL;
140 spu->pid = 0;
141 spu->prio = MAX_PRIO;
142 ctx->ops = &spu_backing_ops;
143 ctx->spu = NULL;
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500144 spu->flags = 0;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500145 spu->ctx = NULL;
146}
147
Mark Nuttera68cf982006-10-04 17:26:12 +0200148static inline void spu_add_wq(wait_queue_head_t * wq, wait_queue_t * wait,
149 int prio)
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500150{
Mark Nuttera68cf982006-10-04 17:26:12 +0200151 prepare_to_wait_exclusive(wq, wait, TASK_INTERRUPTIBLE);
152 set_bit(prio, spu_prio->bitmap);
153}
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500154
Mark Nuttera68cf982006-10-04 17:26:12 +0200155static inline void spu_del_wq(wait_queue_head_t * wq, wait_queue_t * wait,
156 int prio)
157{
158 u64 flags;
159
160 __set_current_state(TASK_RUNNING);
161
162 spin_lock_irqsave(&wq->lock, flags);
163
164 remove_wait_queue_locked(wq, wait);
165 if (list_empty(&wq->task_list))
166 clear_bit(prio, spu_prio->bitmap);
167
168 spin_unlock_irqrestore(&wq->lock, flags);
169}
170
171static void spu_prio_wait(struct spu_context *ctx, u64 flags)
172{
173 int prio = current->prio;
174 wait_queue_head_t *wq = &spu_prio->waitq[prio];
175 DEFINE_WAIT(wait);
176
177 if (ctx->spu)
178 return;
179
180 spu_add_wq(wq, &wait, prio);
181
182 if (!signal_pending(current)) {
183 up_write(&ctx->state_sema);
184 pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__,
185 current->pid, current->prio);
186 schedule();
187 down_write(&ctx->state_sema);
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500188 }
Mark Nuttera68cf982006-10-04 17:26:12 +0200189
190 spu_del_wq(wq, &wait, prio);
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500191}
192
Mark Nuttera68cf982006-10-04 17:26:12 +0200193static void spu_prio_wakeup(void)
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500194{
Mark Nuttera68cf982006-10-04 17:26:12 +0200195 int best = sched_find_first_bit(spu_prio->bitmap);
196 if (best < MAX_PRIO) {
197 wait_queue_head_t *wq = &spu_prio->waitq[best];
198 wake_up_interruptible_nr(wq, 1);
Arnd Bergmann51104592005-12-05 22:52:25 -0500199 }
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500200}
201
202static int get_active_spu(struct spu *spu)
203{
Mark Nuttera68cf982006-10-04 17:26:12 +0200204 int node = spu->node;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500205 struct spu *tmp;
206 int rc = 0;
207
Mark Nuttera68cf982006-10-04 17:26:12 +0200208 mutex_lock(&spu_prio->active_mutex[node]);
209 list_for_each_entry(tmp, &spu_prio->active_list[node], list) {
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500210 if (tmp == spu) {
Mark Nuttera68cf982006-10-04 17:26:12 +0200211 list_del_init(&spu->list);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500212 rc = 1;
213 break;
214 }
215 }
Mark Nuttera68cf982006-10-04 17:26:12 +0200216 mutex_unlock(&spu_prio->active_mutex[node]);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500217 return rc;
218}
219
220static void put_active_spu(struct spu *spu)
221{
Mark Nuttera68cf982006-10-04 17:26:12 +0200222 int node = spu->node;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500223
Mark Nuttera68cf982006-10-04 17:26:12 +0200224 mutex_lock(&spu_prio->active_mutex[node]);
225 list_add_tail(&spu->list, &spu_prio->active_list[node]);
226 mutex_unlock(&spu_prio->active_mutex[node]);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500227}
228
Mark Nuttera68cf982006-10-04 17:26:12 +0200229static struct spu *spu_get_idle(struct spu_context *ctx, u64 flags)
230{
231 struct spu *spu = NULL;
232 int node = cpu_to_node(raw_smp_processor_id());
233 int n;
234
235 for (n = 0; n < MAX_NUMNODES; n++, node++) {
236 node = (node < MAX_NUMNODES) ? node : 0;
237 if (!node_allowed(node))
238 continue;
239 spu = spu_alloc_node(node);
240 if (spu)
241 break;
242 }
243 return spu;
244}
245
246static inline struct spu *spu_get(struct spu_context *ctx, u64 flags)
247{
248 /* Future: spu_get_idle() if possible,
249 * otherwise try to preempt an active
250 * context.
251 */
252 return spu_get_idle(ctx, flags);
253}
254
255/* The three externally callable interfaces
256 * for the scheduler begin here.
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500257 *
Mark Nuttera68cf982006-10-04 17:26:12 +0200258 * spu_activate - bind a context to SPU, waiting as needed.
259 * spu_deactivate - unbind a context from its SPU.
260 * spu_yield - yield an SPU if others are waiting.
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500261 */
262
263int spu_activate(struct spu_context *ctx, u64 flags)
264{
265 struct spu *spu;
Mark Nuttera68cf982006-10-04 17:26:12 +0200266 int ret = 0;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500267
Mark Nuttera68cf982006-10-04 17:26:12 +0200268 for (;;) {
269 if (ctx->spu)
270 return 0;
271 spu = spu_get(ctx, flags);
272 if (spu != NULL) {
273 if (ctx->spu != NULL) {
274 spu_free(spu);
275 spu_prio_wakeup();
276 break;
277 }
278 bind_context(spu, ctx);
279 put_active_spu(spu);
280 break;
281 }
282 spu_prio_wait(ctx, flags);
283 if (signal_pending(current)) {
284 ret = -ERESTARTSYS;
285 spu_prio_wakeup();
286 break;
287 }
288 }
289 return ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500290}
291
292void spu_deactivate(struct spu_context *ctx)
293{
294 struct spu *spu;
295 int needs_idle;
296
297 spu = ctx->spu;
298 if (!spu)
299 return;
300 needs_idle = get_active_spu(spu);
301 unbind_context(spu, ctx);
Mark Nuttera68cf982006-10-04 17:26:12 +0200302 if (needs_idle) {
303 spu_free(spu);
304 spu_prio_wakeup();
305 }
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500306}
307
308void spu_yield(struct spu_context *ctx)
309{
310 struct spu *spu;
Arnd Bergmann51104592005-12-05 22:52:25 -0500311 int need_yield = 0;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500312
Mark Nuttera68cf982006-10-04 17:26:12 +0200313 if (down_write_trylock(&ctx->state_sema)) {
314 if ((spu = ctx->spu) != NULL) {
315 int best = sched_find_first_bit(spu_prio->bitmap);
316 if (best < MAX_PRIO) {
317 pr_debug("%s: yielding SPU %d NODE %d\n",
318 __FUNCTION__, spu->number, spu->node);
319 spu_deactivate(ctx);
Mark Nuttera68cf982006-10-04 17:26:12 +0200320 need_yield = 1;
321 } else {
322 spu->prio = MAX_PRIO;
323 }
324 }
325 up_write(&ctx->state_sema);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500326 }
Arnd Bergmann51104592005-12-05 22:52:25 -0500327 if (unlikely(need_yield))
328 yield();
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500329}
330
331int __init spu_sched_init(void)
332{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500333 int i;
334
Mark Nuttera68cf982006-10-04 17:26:12 +0200335 spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
336 if (!spu_prio) {
337 printk(KERN_WARNING "%s: Unable to allocate priority queue.\n",
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500338 __FUNCTION__);
339 return 1;
340 }
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500341 for (i = 0; i < MAX_PRIO; i++) {
Mark Nuttera68cf982006-10-04 17:26:12 +0200342 init_waitqueue_head(&spu_prio->waitq[i]);
343 __clear_bit(i, spu_prio->bitmap);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500344 }
Mark Nuttera68cf982006-10-04 17:26:12 +0200345 __set_bit(MAX_PRIO, spu_prio->bitmap);
346 for (i = 0; i < MAX_NUMNODES; i++) {
347 mutex_init(&spu_prio->active_mutex[i]);
348 INIT_LIST_HEAD(&spu_prio->active_list[i]);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500349 }
350 return 0;
351}
352
353void __exit spu_sched_exit(void)
354{
Mark Nuttera68cf982006-10-04 17:26:12 +0200355 struct spu *spu, *tmp;
356 int node;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500357
Mark Nuttera68cf982006-10-04 17:26:12 +0200358 for (node = 0; node < MAX_NUMNODES; node++) {
359 mutex_lock(&spu_prio->active_mutex[node]);
360 list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
361 list) {
362 list_del_init(&spu->list);
363 spu_free(spu);
364 }
365 mutex_unlock(&spu_prio->active_mutex[node]);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500366 }
Mark Nuttera68cf982006-10-04 17:26:12 +0200367 kfree(spu_prio);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500368}