blob: bd6fe4b7a84baab84face56b2d8ca5f5c263db2e [file] [log] [blame]
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001/* sched.c - SPU scheduler.
2 *
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
5 *
Mark Nuttera68cf982006-10-04 17:26:12 +02006 * 2006-03-31 NUMA domains added.
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05007 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
Arnd Bergmann3b3d22c2005-12-05 22:52:24 -050023#undef DEBUG
24
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050025#include <linux/module.h>
26#include <linux/errno.h>
27#include <linux/sched.h>
28#include <linux/kernel.h>
29#include <linux/mm.h>
30#include <linux/completion.h>
31#include <linux/vmalloc.h>
32#include <linux/smp.h>
33#include <linux/smp_lock.h>
34#include <linux/stddef.h>
35#include <linux/unistd.h>
Mark Nuttera68cf982006-10-04 17:26:12 +020036#include <linux/numa.h>
37#include <linux/mutex.h>
Arnd Bergmann86767272006-10-04 17:26:21 +020038#include <linux/notifier.h>
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050039
40#include <asm/io.h>
41#include <asm/mmu_context.h>
42#include <asm/spu.h>
43#include <asm/spu_csa.h>
Geoff Levanda91942a2006-06-19 20:33:30 +020044#include <asm/spu_priv1.h>
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050045#include "spufs.h"
46
Arnd Bergmann7945a4a2005-12-09 19:04:16 +010047#define SPU_MIN_TIMESLICE (100 * HZ / 1000)
Arnd Bergmann2a911f02005-12-05 22:52:26 -050048
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050049#define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1)
50struct spu_prio_array {
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050051 unsigned long bitmap[SPU_BITMAP_SIZE];
52 wait_queue_head_t waitq[MAX_PRIO];
Mark Nuttera68cf982006-10-04 17:26:12 +020053 struct list_head active_list[MAX_NUMNODES];
54 struct mutex active_mutex[MAX_NUMNODES];
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050055};
56
Mark Nuttera68cf982006-10-04 17:26:12 +020057static struct spu_prio_array *spu_prio;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050058
Mark Nuttera68cf982006-10-04 17:26:12 +020059static inline int node_allowed(int node)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050060{
Mark Nuttera68cf982006-10-04 17:26:12 +020061 cpumask_t mask;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050062
Mark Nuttera68cf982006-10-04 17:26:12 +020063 if (!nr_cpus_node(node))
64 return 0;
65 mask = node_to_cpumask(node);
66 if (!cpus_intersects(mask, current->cpus_allowed))
67 return 0;
68 return 1;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050069}
70
71static inline void mm_needs_global_tlbie(struct mm_struct *mm)
72{
Mark Nuttera68cf982006-10-04 17:26:12 +020073 int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
74
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050075 /* Global TLBIE broadcast required with SPEs. */
Mark Nuttera68cf982006-10-04 17:26:12 +020076 __cpus_setall(&mm->cpu_vm_mask, nr);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050077}
78
Arnd Bergmann86767272006-10-04 17:26:21 +020079static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
80
81static void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
82{
83 blocking_notifier_call_chain(&spu_switch_notifier,
84 ctx ? ctx->object_id : 0, spu);
85}
86
87int spu_switch_event_register(struct notifier_block * n)
88{
89 return blocking_notifier_chain_register(&spu_switch_notifier, n);
90}
91
92int spu_switch_event_unregister(struct notifier_block * n)
93{
94 return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
95}
96
97
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050098static inline void bind_context(struct spu *spu, struct spu_context *ctx)
99{
Mark Nuttera68cf982006-10-04 17:26:12 +0200100 pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
101 spu->number, spu->node);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500102 spu->ctx = ctx;
103 spu->flags = 0;
104 ctx->spu = spu;
105 ctx->ops = &spu_hw_ops;
106 spu->pid = current->pid;
107 spu->prio = current->prio;
108 spu->mm = ctx->owner;
109 mm_needs_global_tlbie(spu->mm);
110 spu->ibox_callback = spufs_ibox_callback;
111 spu->wbox_callback = spufs_wbox_callback;
Arnd Bergmann51104592005-12-05 22:52:25 -0500112 spu->stop_callback = spufs_stop_callback;
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100113 spu->mfc_callback = spufs_mfc_callback;
Arnd Bergmann9add11d2006-10-04 17:26:14 +0200114 spu->dma_callback = spufs_dma_callback;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500115 mb();
Arnd Bergmann51104592005-12-05 22:52:25 -0500116 spu_unmap_mappings(ctx);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500117 spu_restore(&ctx->csa, spu);
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500118 spu->timestamp = jiffies;
Mark Nuttera68cf982006-10-04 17:26:12 +0200119 spu_cpu_affinity_set(spu, raw_smp_processor_id());
Arnd Bergmann86767272006-10-04 17:26:21 +0200120 spu_switch_notify(spu, ctx);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500121}
122
123static inline void unbind_context(struct spu *spu, struct spu_context *ctx)
124{
Mark Nuttera68cf982006-10-04 17:26:12 +0200125 pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
126 spu->pid, spu->number, spu->node);
Arnd Bergmann86767272006-10-04 17:26:21 +0200127 spu_switch_notify(spu, NULL);
Arnd Bergmann51104592005-12-05 22:52:25 -0500128 spu_unmap_mappings(ctx);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500129 spu_save(&ctx->csa, spu);
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500130 spu->timestamp = jiffies;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500131 ctx->state = SPU_STATE_SAVED;
132 spu->ibox_callback = NULL;
133 spu->wbox_callback = NULL;
Arnd Bergmann51104592005-12-05 22:52:25 -0500134 spu->stop_callback = NULL;
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100135 spu->mfc_callback = NULL;
Arnd Bergmann9add11d2006-10-04 17:26:14 +0200136 spu->dma_callback = NULL;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500137 spu->mm = NULL;
138 spu->pid = 0;
139 spu->prio = MAX_PRIO;
140 ctx->ops = &spu_backing_ops;
141 ctx->spu = NULL;
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500142 spu->flags = 0;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500143 spu->ctx = NULL;
144}
145
Mark Nuttera68cf982006-10-04 17:26:12 +0200146static inline void spu_add_wq(wait_queue_head_t * wq, wait_queue_t * wait,
147 int prio)
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500148{
Mark Nuttera68cf982006-10-04 17:26:12 +0200149 prepare_to_wait_exclusive(wq, wait, TASK_INTERRUPTIBLE);
150 set_bit(prio, spu_prio->bitmap);
151}
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500152
Mark Nuttera68cf982006-10-04 17:26:12 +0200153static inline void spu_del_wq(wait_queue_head_t * wq, wait_queue_t * wait,
154 int prio)
155{
156 u64 flags;
157
158 __set_current_state(TASK_RUNNING);
159
160 spin_lock_irqsave(&wq->lock, flags);
161
162 remove_wait_queue_locked(wq, wait);
163 if (list_empty(&wq->task_list))
164 clear_bit(prio, spu_prio->bitmap);
165
166 spin_unlock_irqrestore(&wq->lock, flags);
167}
168
169static void spu_prio_wait(struct spu_context *ctx, u64 flags)
170{
171 int prio = current->prio;
172 wait_queue_head_t *wq = &spu_prio->waitq[prio];
173 DEFINE_WAIT(wait);
174
175 if (ctx->spu)
176 return;
177
178 spu_add_wq(wq, &wait, prio);
179
180 if (!signal_pending(current)) {
181 up_write(&ctx->state_sema);
182 pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__,
183 current->pid, current->prio);
184 schedule();
185 down_write(&ctx->state_sema);
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500186 }
Mark Nuttera68cf982006-10-04 17:26:12 +0200187
188 spu_del_wq(wq, &wait, prio);
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500189}
190
Mark Nuttera68cf982006-10-04 17:26:12 +0200191static void spu_prio_wakeup(void)
Arnd Bergmann2a911f02005-12-05 22:52:26 -0500192{
Mark Nuttera68cf982006-10-04 17:26:12 +0200193 int best = sched_find_first_bit(spu_prio->bitmap);
194 if (best < MAX_PRIO) {
195 wait_queue_head_t *wq = &spu_prio->waitq[best];
196 wake_up_interruptible_nr(wq, 1);
Arnd Bergmann51104592005-12-05 22:52:25 -0500197 }
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500198}
199
200static int get_active_spu(struct spu *spu)
201{
Mark Nuttera68cf982006-10-04 17:26:12 +0200202 int node = spu->node;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500203 struct spu *tmp;
204 int rc = 0;
205
Mark Nuttera68cf982006-10-04 17:26:12 +0200206 mutex_lock(&spu_prio->active_mutex[node]);
207 list_for_each_entry(tmp, &spu_prio->active_list[node], list) {
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500208 if (tmp == spu) {
Mark Nuttera68cf982006-10-04 17:26:12 +0200209 list_del_init(&spu->list);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500210 rc = 1;
211 break;
212 }
213 }
Mark Nuttera68cf982006-10-04 17:26:12 +0200214 mutex_unlock(&spu_prio->active_mutex[node]);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500215 return rc;
216}
217
218static void put_active_spu(struct spu *spu)
219{
Mark Nuttera68cf982006-10-04 17:26:12 +0200220 int node = spu->node;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500221
Mark Nuttera68cf982006-10-04 17:26:12 +0200222 mutex_lock(&spu_prio->active_mutex[node]);
223 list_add_tail(&spu->list, &spu_prio->active_list[node]);
224 mutex_unlock(&spu_prio->active_mutex[node]);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500225}
226
Mark Nuttera68cf982006-10-04 17:26:12 +0200227static struct spu *spu_get_idle(struct spu_context *ctx, u64 flags)
228{
229 struct spu *spu = NULL;
230 int node = cpu_to_node(raw_smp_processor_id());
231 int n;
232
233 for (n = 0; n < MAX_NUMNODES; n++, node++) {
234 node = (node < MAX_NUMNODES) ? node : 0;
235 if (!node_allowed(node))
236 continue;
237 spu = spu_alloc_node(node);
238 if (spu)
239 break;
240 }
241 return spu;
242}
243
244static inline struct spu *spu_get(struct spu_context *ctx, u64 flags)
245{
246 /* Future: spu_get_idle() if possible,
247 * otherwise try to preempt an active
248 * context.
249 */
250 return spu_get_idle(ctx, flags);
251}
252
253/* The three externally callable interfaces
254 * for the scheduler begin here.
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500255 *
Mark Nuttera68cf982006-10-04 17:26:12 +0200256 * spu_activate - bind a context to SPU, waiting as needed.
257 * spu_deactivate - unbind a context from its SPU.
258 * spu_yield - yield an SPU if others are waiting.
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500259 */
260
261int spu_activate(struct spu_context *ctx, u64 flags)
262{
263 struct spu *spu;
Mark Nuttera68cf982006-10-04 17:26:12 +0200264 int ret = 0;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500265
Mark Nuttera68cf982006-10-04 17:26:12 +0200266 for (;;) {
267 if (ctx->spu)
268 return 0;
269 spu = spu_get(ctx, flags);
270 if (spu != NULL) {
271 if (ctx->spu != NULL) {
272 spu_free(spu);
273 spu_prio_wakeup();
274 break;
275 }
276 bind_context(spu, ctx);
277 put_active_spu(spu);
278 break;
279 }
280 spu_prio_wait(ctx, flags);
281 if (signal_pending(current)) {
282 ret = -ERESTARTSYS;
283 spu_prio_wakeup();
284 break;
285 }
286 }
287 return ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500288}
289
290void spu_deactivate(struct spu_context *ctx)
291{
292 struct spu *spu;
293 int needs_idle;
294
295 spu = ctx->spu;
296 if (!spu)
297 return;
298 needs_idle = get_active_spu(spu);
299 unbind_context(spu, ctx);
Mark Nuttera68cf982006-10-04 17:26:12 +0200300 if (needs_idle) {
301 spu_free(spu);
302 spu_prio_wakeup();
303 }
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500304}
305
306void spu_yield(struct spu_context *ctx)
307{
308 struct spu *spu;
Arnd Bergmann51104592005-12-05 22:52:25 -0500309 int need_yield = 0;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500310
Mark Nuttera68cf982006-10-04 17:26:12 +0200311 if (down_write_trylock(&ctx->state_sema)) {
312 if ((spu = ctx->spu) != NULL) {
313 int best = sched_find_first_bit(spu_prio->bitmap);
314 if (best < MAX_PRIO) {
315 pr_debug("%s: yielding SPU %d NODE %d\n",
316 __FUNCTION__, spu->number, spu->node);
317 spu_deactivate(ctx);
318 ctx->state = SPU_STATE_SAVED;
319 need_yield = 1;
320 } else {
321 spu->prio = MAX_PRIO;
322 }
323 }
324 up_write(&ctx->state_sema);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500325 }
Arnd Bergmann51104592005-12-05 22:52:25 -0500326 if (unlikely(need_yield))
327 yield();
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500328}
329
330int __init spu_sched_init(void)
331{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500332 int i;
333
Mark Nuttera68cf982006-10-04 17:26:12 +0200334 spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
335 if (!spu_prio) {
336 printk(KERN_WARNING "%s: Unable to allocate priority queue.\n",
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500337 __FUNCTION__);
338 return 1;
339 }
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500340 for (i = 0; i < MAX_PRIO; i++) {
Mark Nuttera68cf982006-10-04 17:26:12 +0200341 init_waitqueue_head(&spu_prio->waitq[i]);
342 __clear_bit(i, spu_prio->bitmap);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500343 }
Mark Nuttera68cf982006-10-04 17:26:12 +0200344 __set_bit(MAX_PRIO, spu_prio->bitmap);
345 for (i = 0; i < MAX_NUMNODES; i++) {
346 mutex_init(&spu_prio->active_mutex[i]);
347 INIT_LIST_HEAD(&spu_prio->active_list[i]);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500348 }
349 return 0;
350}
351
352void __exit spu_sched_exit(void)
353{
Mark Nuttera68cf982006-10-04 17:26:12 +0200354 struct spu *spu, *tmp;
355 int node;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500356
Mark Nuttera68cf982006-10-04 17:26:12 +0200357 for (node = 0; node < MAX_NUMNODES; node++) {
358 mutex_lock(&spu_prio->active_mutex[node]);
359 list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
360 list) {
361 list_del_init(&spu->list);
362 spu_free(spu);
363 }
364 mutex_unlock(&spu_prio->active_mutex[node]);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500365 }
Mark Nuttera68cf982006-10-04 17:26:12 +0200366 kfree(spu_prio);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500367}