Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 1 | /* |
| 2 | * SPU file system -- SPU context management |
| 3 | * |
| 4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 |
| 5 | * |
| 6 | * Author: Arnd Bergmann <arndb@de.ibm.com> |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License as published by |
| 10 | * the Free Software Foundation; either version 2, or (at your option) |
| 11 | * any later version. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 16 | * GNU General Public License for more details. |
| 17 | * |
| 18 | * You should have received a copy of the GNU General Public License |
| 19 | * along with this program; if not, write to the Free Software |
| 20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 21 | */ |
| 22 | |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 23 | #include <linux/fs.h> |
| 24 | #include <linux/mm.h> |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 25 | #include <linux/slab.h> |
| 26 | #include <asm/spu.h> |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 27 | #include <asm/spu_csa.h> |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 28 | #include "spufs.h" |
| 29 | |
Arnd Bergmann | 6263203 | 2006-10-04 17:26:15 +0200 | [diff] [blame] | 30 | struct spu_context *alloc_spu_context(struct spu_gang *gang) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 31 | { |
| 32 | struct spu_context *ctx; |
Jeremy Kerr | c5c4591 | 2006-04-28 16:37:47 +0800 | [diff] [blame] | 33 | ctx = kzalloc(sizeof *ctx, GFP_KERNEL); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 34 | if (!ctx) |
| 35 | goto out; |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 36 | /* Binding to physical processor deferred |
| 37 | * until spu_activate(). |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 38 | */ |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 39 | spu_init_csa(&ctx->csa); |
| 40 | if (!ctx->csa.lscsa) { |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 41 | goto out_free; |
| 42 | } |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 43 | spin_lock_init(&ctx->mmio_lock); |
| 44 | kref_init(&ctx->kref); |
Christoph Hellwig | 650f8b0 | 2007-02-13 21:36:50 +0100 | [diff] [blame] | 45 | mutex_init(&ctx->state_mutex); |
Arnd Bergmann | 5ef8224 | 2006-01-04 20:31:24 +0100 | [diff] [blame] | 46 | init_MUTEX(&ctx->run_sema); |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 47 | init_waitqueue_head(&ctx->ibox_wq); |
| 48 | init_waitqueue_head(&ctx->wbox_wq); |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 49 | init_waitqueue_head(&ctx->stop_wq); |
Arnd Bergmann | a33a7d7 | 2006-03-23 00:00:11 +0100 | [diff] [blame] | 50 | init_waitqueue_head(&ctx->mfc_wq); |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 51 | ctx->state = SPU_STATE_SAVED; |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 52 | ctx->ops = &spu_backing_ops; |
| 53 | ctx->owner = get_task_mm(current); |
Arnd Bergmann | 6263203 | 2006-10-04 17:26:15 +0200 | [diff] [blame] | 54 | if (gang) |
| 55 | spu_gang_add_ctx(gang, ctx); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 56 | goto out; |
| 57 | out_free: |
| 58 | kfree(ctx); |
| 59 | ctx = NULL; |
| 60 | out: |
| 61 | return ctx; |
| 62 | } |
| 63 | |
| 64 | void destroy_spu_context(struct kref *kref) |
| 65 | { |
| 66 | struct spu_context *ctx; |
| 67 | ctx = container_of(kref, struct spu_context, kref); |
Christoph Hellwig | 650f8b0 | 2007-02-13 21:36:50 +0100 | [diff] [blame] | 68 | mutex_lock(&ctx->state_mutex); |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 69 | spu_deactivate(ctx); |
Christoph Hellwig | 650f8b0 | 2007-02-13 21:36:50 +0100 | [diff] [blame] | 70 | mutex_unlock(&ctx->state_mutex); |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 71 | spu_fini_csa(&ctx->csa); |
Arnd Bergmann | 6263203 | 2006-10-04 17:26:15 +0200 | [diff] [blame] | 72 | if (ctx->gang) |
| 73 | spu_gang_remove_ctx(ctx->gang, ctx); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 74 | kfree(ctx); |
| 75 | } |
| 76 | |
| 77 | struct spu_context * get_spu_context(struct spu_context *ctx) |
| 78 | { |
| 79 | kref_get(&ctx->kref); |
| 80 | return ctx; |
| 81 | } |
| 82 | |
| 83 | int put_spu_context(struct spu_context *ctx) |
| 84 | { |
| 85 | return kref_put(&ctx->kref, &destroy_spu_context); |
| 86 | } |
| 87 | |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 88 | /* give up the mm reference when the context is about to be destroyed */ |
| 89 | void spu_forget(struct spu_context *ctx) |
| 90 | { |
| 91 | struct mm_struct *mm; |
| 92 | spu_acquire_saved(ctx); |
| 93 | mm = ctx->owner; |
| 94 | ctx->owner = NULL; |
| 95 | mmput(mm); |
| 96 | spu_release(ctx); |
| 97 | } |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 98 | |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 99 | void spu_unmap_mappings(struct spu_context *ctx) |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 100 | { |
Mark Nutter | 6df10a8 | 2006-03-23 00:00:12 +0100 | [diff] [blame] | 101 | if (ctx->local_store) |
| 102 | unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1); |
| 103 | if (ctx->mfc) |
Benjamin Herrenschmidt | 17e0e27 | 2007-02-13 11:46:08 +1100 | [diff] [blame] | 104 | unmap_mapping_range(ctx->mfc, 0, 0x1000, 1); |
Mark Nutter | 6df10a8 | 2006-03-23 00:00:12 +0100 | [diff] [blame] | 105 | if (ctx->cntl) |
Benjamin Herrenschmidt | 17e0e27 | 2007-02-13 11:46:08 +1100 | [diff] [blame] | 106 | unmap_mapping_range(ctx->cntl, 0, 0x1000, 1); |
Mark Nutter | 6df10a8 | 2006-03-23 00:00:12 +0100 | [diff] [blame] | 107 | if (ctx->signal1) |
Benjamin Herrenschmidt | 17e0e27 | 2007-02-13 11:46:08 +1100 | [diff] [blame] | 108 | unmap_mapping_range(ctx->signal1, 0, PAGE_SIZE, 1); |
Mark Nutter | 6df10a8 | 2006-03-23 00:00:12 +0100 | [diff] [blame] | 109 | if (ctx->signal2) |
Benjamin Herrenschmidt | 17e0e27 | 2007-02-13 11:46:08 +1100 | [diff] [blame] | 110 | unmap_mapping_range(ctx->signal2, 0, PAGE_SIZE, 1); |
| 111 | if (ctx->mss) |
| 112 | unmap_mapping_range(ctx->mss, 0, 0x1000, 1); |
| 113 | if (ctx->psmap) |
| 114 | unmap_mapping_range(ctx->psmap, 0, 0x20000, 1); |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 115 | } |
| 116 | |
Christoph Hellwig | 6a0641e5 | 2007-02-13 21:54:21 +0100 | [diff] [blame^] | 117 | /** |
| 118 | * spu_acquire_exclusive - lock spu contex and protect against userspace access |
| 119 | * @ctx: spu contex to lock |
| 120 | * |
| 121 | * Note: |
| 122 | * Returns 0 and with the context locked on success |
| 123 | * Returns negative error and with the context _unlocked_ on failure. |
| 124 | */ |
Jeremy Kerr | 099814b | 2006-10-24 18:31:19 +0200 | [diff] [blame] | 125 | int spu_acquire_exclusive(struct spu_context *ctx) |
| 126 | { |
Christoph Hellwig | 6a0641e5 | 2007-02-13 21:54:21 +0100 | [diff] [blame^] | 127 | int ret = -EINVAL; |
Jeremy Kerr | 099814b | 2006-10-24 18:31:19 +0200 | [diff] [blame] | 128 | |
Christoph Hellwig | 6a0641e5 | 2007-02-13 21:54:21 +0100 | [diff] [blame^] | 129 | spu_acquire(ctx); |
| 130 | /* |
| 131 | * Context is about to be freed, so we can't acquire it anymore. |
| 132 | */ |
| 133 | if (!ctx->owner) |
| 134 | goto out_unlock; |
Jeremy Kerr | 099814b | 2006-10-24 18:31:19 +0200 | [diff] [blame] | 135 | |
Arnd Bergmann | ee2d734 | 2006-11-20 18:45:08 +0100 | [diff] [blame] | 136 | if (ctx->state == SPU_STATE_SAVED) { |
| 137 | ret = spu_activate(ctx, 0); |
| 138 | if (ret) |
Christoph Hellwig | 6a0641e5 | 2007-02-13 21:54:21 +0100 | [diff] [blame^] | 139 | goto out_unlock; |
Arnd Bergmann | ee2d734 | 2006-11-20 18:45:08 +0100 | [diff] [blame] | 140 | } else { |
Christoph Hellwig | 6a0641e5 | 2007-02-13 21:54:21 +0100 | [diff] [blame^] | 141 | /* |
| 142 | * We need to exclude userspace access to the context. |
| 143 | * |
| 144 | * To protect against memory access we invalidate all ptes |
| 145 | * and make sure the pagefault handlers block on the mutex. |
| 146 | */ |
Arnd Bergmann | ee2d734 | 2006-11-20 18:45:08 +0100 | [diff] [blame] | 147 | spu_unmap_mappings(ctx); |
| 148 | } |
Jeremy Kerr | 099814b | 2006-10-24 18:31:19 +0200 | [diff] [blame] | 149 | |
Christoph Hellwig | 6a0641e5 | 2007-02-13 21:54:21 +0100 | [diff] [blame^] | 150 | return 0; |
| 151 | |
| 152 | out_unlock: |
| 153 | spu_release(ctx); |
Arnd Bergmann | ee2d734 | 2006-11-20 18:45:08 +0100 | [diff] [blame] | 154 | return ret; |
Jeremy Kerr | 099814b | 2006-10-24 18:31:19 +0200 | [diff] [blame] | 155 | } |
| 156 | |
Christoph Hellwig | 6a0641e5 | 2007-02-13 21:54:21 +0100 | [diff] [blame^] | 157 | /** |
| 158 | * spu_acquire_runnable - lock spu contex and make sure it is in runnable state |
| 159 | * @ctx: spu contex to lock |
| 160 | * |
| 161 | * Note: |
| 162 | * Returns 0 and with the context locked on success |
| 163 | * Returns negative error and with the context _unlocked_ on failure. |
| 164 | */ |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 165 | int spu_acquire_runnable(struct spu_context *ctx) |
| 166 | { |
Christoph Hellwig | 6a0641e5 | 2007-02-13 21:54:21 +0100 | [diff] [blame^] | 167 | int ret = -EINVAL; |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 168 | |
Christoph Hellwig | 6a0641e5 | 2007-02-13 21:54:21 +0100 | [diff] [blame^] | 169 | spu_acquire(ctx); |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 170 | if (ctx->state == SPU_STATE_SAVED) { |
Christoph Hellwig | 6a0641e5 | 2007-02-13 21:54:21 +0100 | [diff] [blame^] | 171 | /* |
| 172 | * Context is about to be freed, so we can't acquire it anymore. |
| 173 | */ |
| 174 | if (!ctx->owner) |
| 175 | goto out_unlock; |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 176 | ret = spu_activate(ctx, 0); |
Arnd Bergmann | 0106246 | 2006-01-04 20:31:25 +0100 | [diff] [blame] | 177 | if (ret) |
Christoph Hellwig | 6a0641e5 | 2007-02-13 21:54:21 +0100 | [diff] [blame^] | 178 | goto out_unlock; |
| 179 | } else |
| 180 | ctx->spu->prio = current->prio; |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 181 | |
Christoph Hellwig | 6a0641e5 | 2007-02-13 21:54:21 +0100 | [diff] [blame^] | 182 | return 0; |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 183 | |
Christoph Hellwig | 6a0641e5 | 2007-02-13 21:54:21 +0100 | [diff] [blame^] | 184 | out_unlock: |
| 185 | spu_release(ctx); |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 186 | return ret; |
| 187 | } |
| 188 | |
Christoph Hellwig | 6a0641e5 | 2007-02-13 21:54:21 +0100 | [diff] [blame^] | 189 | /** |
| 190 | * spu_acquire_saved - lock spu contex and make sure it is in saved state |
| 191 | * @ctx: spu contex to lock |
| 192 | */ |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 193 | void spu_acquire_saved(struct spu_context *ctx) |
| 194 | { |
Christoph Hellwig | 6a0641e5 | 2007-02-13 21:54:21 +0100 | [diff] [blame^] | 195 | spu_acquire(ctx); |
| 196 | if (ctx->state != SPU_STATE_SAVED) |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 197 | spu_deactivate(ctx); |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 198 | } |