blob: b3954aba424e2abc5365cd2c65c2b4f3448f9957 [file] [log] [blame]
Arnd Bergmann67207b92005-11-15 15:53:48 -05001/*
2 * SPU file system -- SPU context management
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050023#include <linux/fs.h>
24#include <linux/mm.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050025#include <linux/slab.h>
26#include <asm/spu.h>
Mark Nutter5473af02005-11-15 15:53:49 -050027#include <asm/spu_csa.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050028#include "spufs.h"
29
Arnd Bergmann62632032006-10-04 17:26:15 +020030struct spu_context *alloc_spu_context(struct spu_gang *gang)
Arnd Bergmann67207b92005-11-15 15:53:48 -050031{
32 struct spu_context *ctx;
Jeremy Kerrc5c45912006-04-28 16:37:47 +080033 ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
Arnd Bergmann67207b92005-11-15 15:53:48 -050034 if (!ctx)
35 goto out;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050036 /* Binding to physical processor deferred
37 * until spu_activate().
Mark Nutter5473af02005-11-15 15:53:49 -050038 */
Mark Nutter5473af02005-11-15 15:53:49 -050039 spu_init_csa(&ctx->csa);
40 if (!ctx->csa.lscsa) {
Mark Nutter5473af02005-11-15 15:53:49 -050041 goto out_free;
42 }
Arnd Bergmann67207b92005-11-15 15:53:48 -050043 spin_lock_init(&ctx->mmio_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +020044 spin_lock_init(&ctx->mapping_lock);
Arnd Bergmann67207b92005-11-15 15:53:48 -050045 kref_init(&ctx->kref);
Christoph Hellwig650f8b02007-02-13 21:36:50 +010046 mutex_init(&ctx->state_mutex);
Arnd Bergmann5ef82242006-01-04 20:31:24 +010047 init_MUTEX(&ctx->run_sema);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050048 init_waitqueue_head(&ctx->ibox_wq);
49 init_waitqueue_head(&ctx->wbox_wq);
Arnd Bergmann51104592005-12-05 22:52:25 -050050 init_waitqueue_head(&ctx->stop_wq);
Arnd Bergmanna33a7d72006-03-23 00:00:11 +010051 init_waitqueue_head(&ctx->mfc_wq);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050052 ctx->state = SPU_STATE_SAVED;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050053 ctx->ops = &spu_backing_ops;
54 ctx->owner = get_task_mm(current);
Arnd Bergmann62632032006-10-04 17:26:15 +020055 if (gang)
56 spu_gang_add_ctx(gang, ctx);
Christoph Hellwig52f04fc2007-02-13 21:54:27 +010057 ctx->rt_priority = current->rt_priority;
Christoph Hellwig2eb1b122007-02-13 21:54:29 +010058 ctx->policy = current->policy;
Christoph Hellwig83899982007-02-13 21:54:22 +010059 ctx->prio = current->prio;
Christoph Hellwig2eb1b122007-02-13 21:54:29 +010060 INIT_DELAYED_WORK(&ctx->sched_work, spu_sched_tick);
Arnd Bergmann67207b92005-11-15 15:53:48 -050061 goto out;
62out_free:
63 kfree(ctx);
64 ctx = NULL;
65out:
66 return ctx;
67}
68
69void destroy_spu_context(struct kref *kref)
70{
71 struct spu_context *ctx;
72 ctx = container_of(kref, struct spu_context, kref);
Christoph Hellwig650f8b02007-02-13 21:36:50 +010073 mutex_lock(&ctx->state_mutex);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050074 spu_deactivate(ctx);
Christoph Hellwig650f8b02007-02-13 21:36:50 +010075 mutex_unlock(&ctx->state_mutex);
Mark Nutter5473af02005-11-15 15:53:49 -050076 spu_fini_csa(&ctx->csa);
Arnd Bergmann62632032006-10-04 17:26:15 +020077 if (ctx->gang)
78 spu_gang_remove_ctx(ctx->gang, ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -050079 kfree(ctx);
80}
81
82struct spu_context * get_spu_context(struct spu_context *ctx)
83{
84 kref_get(&ctx->kref);
85 return ctx;
86}
87
88int put_spu_context(struct spu_context *ctx)
89{
90 return kref_put(&ctx->kref, &destroy_spu_context);
91}
92
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050093/* give up the mm reference when the context is about to be destroyed */
94void spu_forget(struct spu_context *ctx)
95{
96 struct mm_struct *mm;
97 spu_acquire_saved(ctx);
98 mm = ctx->owner;
99 ctx->owner = NULL;
100 mmput(mm);
101 spu_release(ctx);
102}
Arnd Bergmann67207b92005-11-15 15:53:48 -0500103
Arnd Bergmann51104592005-12-05 22:52:25 -0500104void spu_unmap_mappings(struct spu_context *ctx)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500105{
Mark Nutter6df10a82006-03-23 00:00:12 +0100106 if (ctx->local_store)
107 unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1);
108 if (ctx->mfc)
Benjamin Herrenschmidt17e0e272007-02-13 11:46:08 +1100109 unmap_mapping_range(ctx->mfc, 0, 0x1000, 1);
Mark Nutter6df10a82006-03-23 00:00:12 +0100110 if (ctx->cntl)
Benjamin Herrenschmidt17e0e272007-02-13 11:46:08 +1100111 unmap_mapping_range(ctx->cntl, 0, 0x1000, 1);
Mark Nutter6df10a82006-03-23 00:00:12 +0100112 if (ctx->signal1)
Benjamin Herrenschmidt17e0e272007-02-13 11:46:08 +1100113 unmap_mapping_range(ctx->signal1, 0, PAGE_SIZE, 1);
Mark Nutter6df10a82006-03-23 00:00:12 +0100114 if (ctx->signal2)
Benjamin Herrenschmidt17e0e272007-02-13 11:46:08 +1100115 unmap_mapping_range(ctx->signal2, 0, PAGE_SIZE, 1);
116 if (ctx->mss)
117 unmap_mapping_range(ctx->mss, 0, 0x1000, 1);
118 if (ctx->psmap)
119 unmap_mapping_range(ctx->psmap, 0, 0x20000, 1);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500120}
121
Christoph Hellwig6a0641e52007-02-13 21:54:21 +0100122/**
123 * spu_acquire_exclusive - lock spu contex and protect against userspace access
124 * @ctx: spu contex to lock
125 *
126 * Note:
127 * Returns 0 and with the context locked on success
128 * Returns negative error and with the context _unlocked_ on failure.
129 */
Jeremy Kerr099814b2006-10-24 18:31:19 +0200130int spu_acquire_exclusive(struct spu_context *ctx)
131{
Christoph Hellwig6a0641e52007-02-13 21:54:21 +0100132 int ret = -EINVAL;
Jeremy Kerr099814b2006-10-24 18:31:19 +0200133
Christoph Hellwig6a0641e52007-02-13 21:54:21 +0100134 spu_acquire(ctx);
135 /*
136 * Context is about to be freed, so we can't acquire it anymore.
137 */
138 if (!ctx->owner)
139 goto out_unlock;
Jeremy Kerr099814b2006-10-24 18:31:19 +0200140
Arnd Bergmannee2d7342006-11-20 18:45:08 +0100141 if (ctx->state == SPU_STATE_SAVED) {
142 ret = spu_activate(ctx, 0);
143 if (ret)
Christoph Hellwig6a0641e52007-02-13 21:54:21 +0100144 goto out_unlock;
Arnd Bergmannee2d7342006-11-20 18:45:08 +0100145 } else {
Christoph Hellwig6a0641e52007-02-13 21:54:21 +0100146 /*
147 * We need to exclude userspace access to the context.
148 *
149 * To protect against memory access we invalidate all ptes
150 * and make sure the pagefault handlers block on the mutex.
151 */
Arnd Bergmannee2d7342006-11-20 18:45:08 +0100152 spu_unmap_mappings(ctx);
153 }
Jeremy Kerr099814b2006-10-24 18:31:19 +0200154
Christoph Hellwig6a0641e52007-02-13 21:54:21 +0100155 return 0;
156
157 out_unlock:
158 spu_release(ctx);
Arnd Bergmannee2d7342006-11-20 18:45:08 +0100159 return ret;
Jeremy Kerr099814b2006-10-24 18:31:19 +0200160}
161
Christoph Hellwig6a0641e52007-02-13 21:54:21 +0100162/**
163 * spu_acquire_runnable - lock spu contex and make sure it is in runnable state
164 * @ctx: spu contex to lock
165 *
166 * Note:
167 * Returns 0 and with the context locked on success
168 * Returns negative error and with the context _unlocked_ on failure.
169 */
Christoph Hellwig26bec672007-02-13 21:54:24 +0100170int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500171{
Christoph Hellwig6a0641e52007-02-13 21:54:21 +0100172 int ret = -EINVAL;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500173
Christoph Hellwig6a0641e52007-02-13 21:54:21 +0100174 spu_acquire(ctx);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500175 if (ctx->state == SPU_STATE_SAVED) {
Christoph Hellwig6a0641e52007-02-13 21:54:21 +0100176 /*
177 * Context is about to be freed, so we can't acquire it anymore.
178 */
179 if (!ctx->owner)
180 goto out_unlock;
Christoph Hellwig26bec672007-02-13 21:54:24 +0100181 ret = spu_activate(ctx, flags);
Arnd Bergmann01062462006-01-04 20:31:25 +0100182 if (ret)
Christoph Hellwig6a0641e52007-02-13 21:54:21 +0100183 goto out_unlock;
Christoph Hellwig83899982007-02-13 21:54:22 +0100184 }
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500185
Christoph Hellwig6a0641e52007-02-13 21:54:21 +0100186 return 0;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500187
Christoph Hellwig6a0641e52007-02-13 21:54:21 +0100188 out_unlock:
189 spu_release(ctx);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500190 return ret;
191}
192
Christoph Hellwig6a0641e52007-02-13 21:54:21 +0100193/**
194 * spu_acquire_saved - lock spu contex and make sure it is in saved state
195 * @ctx: spu contex to lock
196 */
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500197void spu_acquire_saved(struct spu_context *ctx)
198{
Christoph Hellwig6a0641e52007-02-13 21:54:21 +0100199 spu_acquire(ctx);
200 if (ctx->state != SPU_STATE_SAVED)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500201 spu_deactivate(ctx);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500202}