Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame^] | 1 | /* drm_dma.c -- DMA IOCTL and function support -*- linux-c -*- |
| 2 | * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com |
| 3 | * |
| 4 | * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. |
| 5 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. |
| 6 | * All Rights Reserved. |
| 7 | * |
| 8 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 9 | * copy of this software and associated documentation files (the "Software"), |
| 10 | * to deal in the Software without restriction, including without limitation |
| 11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 12 | * and/or sell copies of the Software, and to permit persons to whom the |
| 13 | * Software is furnished to do so, subject to the following conditions: |
| 14 | * |
| 15 | * The above copyright notice and this permission notice (including the next |
| 16 | * paragraph) shall be included in all copies or substantial portions of the |
| 17 | * Software. |
| 18 | * |
| 19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 22 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 25 | * OTHER DEALINGS IN THE SOFTWARE. |
| 26 | * |
| 27 | * Authors: |
| 28 | * Rickard E. (Rik) Faith <faith@valinux.com> |
| 29 | * Gareth Hughes <gareth@valinux.com> |
| 30 | */ |
| 31 | |
| 32 | |
| 33 | /* Gamma-specific code pulled from drm_dma.h: |
| 34 | */ |
| 35 | |
| 36 | void DRM(clear_next_buffer)(drm_device_t *dev) |
| 37 | { |
| 38 | drm_device_dma_t *dma = dev->dma; |
| 39 | |
| 40 | dma->next_buffer = NULL; |
| 41 | if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) { |
| 42 | wake_up_interruptible(&dma->next_queue->flush_queue); |
| 43 | } |
| 44 | dma->next_queue = NULL; |
| 45 | } |
| 46 | |
| 47 | int DRM(select_queue)(drm_device_t *dev, void (*wrapper)(unsigned long)) |
| 48 | { |
| 49 | int i; |
| 50 | int candidate = -1; |
| 51 | int j = jiffies; |
| 52 | |
| 53 | if (!dev) { |
| 54 | DRM_ERROR("No device\n"); |
| 55 | return -1; |
| 56 | } |
| 57 | if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) { |
| 58 | /* This only happens between the time the |
| 59 | interrupt is initialized and the time |
| 60 | the queues are initialized. */ |
| 61 | return -1; |
| 62 | } |
| 63 | |
| 64 | /* Doing "while locked" DMA? */ |
| 65 | if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) { |
| 66 | return DRM_KERNEL_CONTEXT; |
| 67 | } |
| 68 | |
| 69 | /* If there are buffers on the last_context |
| 70 | queue, and we have not been executing |
| 71 | this context very long, continue to |
| 72 | execute this context. */ |
| 73 | if (dev->last_switch <= j |
| 74 | && dev->last_switch + DRM_TIME_SLICE > j |
| 75 | && DRM_WAITCOUNT(dev, dev->last_context)) { |
| 76 | return dev->last_context; |
| 77 | } |
| 78 | |
| 79 | /* Otherwise, find a candidate */ |
| 80 | for (i = dev->last_checked + 1; i < dev->queue_count; i++) { |
| 81 | if (DRM_WAITCOUNT(dev, i)) { |
| 82 | candidate = dev->last_checked = i; |
| 83 | break; |
| 84 | } |
| 85 | } |
| 86 | |
| 87 | if (candidate < 0) { |
| 88 | for (i = 0; i < dev->queue_count; i++) { |
| 89 | if (DRM_WAITCOUNT(dev, i)) { |
| 90 | candidate = dev->last_checked = i; |
| 91 | break; |
| 92 | } |
| 93 | } |
| 94 | } |
| 95 | |
| 96 | if (wrapper |
| 97 | && candidate >= 0 |
| 98 | && candidate != dev->last_context |
| 99 | && dev->last_switch <= j |
| 100 | && dev->last_switch + DRM_TIME_SLICE > j) { |
| 101 | if (dev->timer.expires != dev->last_switch + DRM_TIME_SLICE) { |
| 102 | del_timer(&dev->timer); |
| 103 | dev->timer.function = wrapper; |
| 104 | dev->timer.data = (unsigned long)dev; |
| 105 | dev->timer.expires = dev->last_switch+DRM_TIME_SLICE; |
| 106 | add_timer(&dev->timer); |
| 107 | } |
| 108 | return -1; |
| 109 | } |
| 110 | |
| 111 | return candidate; |
| 112 | } |
| 113 | |
| 114 | |
| 115 | int DRM(dma_enqueue)(struct file *filp, drm_dma_t *d) |
| 116 | { |
| 117 | drm_file_t *priv = filp->private_data; |
| 118 | drm_device_t *dev = priv->dev; |
| 119 | int i; |
| 120 | drm_queue_t *q; |
| 121 | drm_buf_t *buf; |
| 122 | int idx; |
| 123 | int while_locked = 0; |
| 124 | drm_device_dma_t *dma = dev->dma; |
| 125 | int *ind; |
| 126 | int err; |
| 127 | DECLARE_WAITQUEUE(entry, current); |
| 128 | |
| 129 | DRM_DEBUG("%d\n", d->send_count); |
| 130 | |
| 131 | if (d->flags & _DRM_DMA_WHILE_LOCKED) { |
| 132 | int context = dev->lock.hw_lock->lock; |
| 133 | |
| 134 | if (!_DRM_LOCK_IS_HELD(context)) { |
| 135 | DRM_ERROR("No lock held during \"while locked\"" |
| 136 | " request\n"); |
| 137 | return -EINVAL; |
| 138 | } |
| 139 | if (d->context != _DRM_LOCKING_CONTEXT(context) |
| 140 | && _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) { |
| 141 | DRM_ERROR("Lock held by %d while %d makes" |
| 142 | " \"while locked\" request\n", |
| 143 | _DRM_LOCKING_CONTEXT(context), |
| 144 | d->context); |
| 145 | return -EINVAL; |
| 146 | } |
| 147 | q = dev->queuelist[DRM_KERNEL_CONTEXT]; |
| 148 | while_locked = 1; |
| 149 | } else { |
| 150 | q = dev->queuelist[d->context]; |
| 151 | } |
| 152 | |
| 153 | |
| 154 | atomic_inc(&q->use_count); |
| 155 | if (atomic_read(&q->block_write)) { |
| 156 | add_wait_queue(&q->write_queue, &entry); |
| 157 | atomic_inc(&q->block_count); |
| 158 | for (;;) { |
| 159 | current->state = TASK_INTERRUPTIBLE; |
| 160 | if (!atomic_read(&q->block_write)) break; |
| 161 | schedule(); |
| 162 | if (signal_pending(current)) { |
| 163 | atomic_dec(&q->use_count); |
| 164 | remove_wait_queue(&q->write_queue, &entry); |
| 165 | return -EINTR; |
| 166 | } |
| 167 | } |
| 168 | atomic_dec(&q->block_count); |
| 169 | current->state = TASK_RUNNING; |
| 170 | remove_wait_queue(&q->write_queue, &entry); |
| 171 | } |
| 172 | |
| 173 | ind = DRM(alloc)(d->send_count * sizeof(int), DRM_MEM_DRIVER); |
| 174 | if (!ind) |
| 175 | return -ENOMEM; |
| 176 | |
| 177 | if (copy_from_user(ind, d->send_indices, d->send_count * sizeof(int))) { |
| 178 | err = -EFAULT; |
| 179 | goto out; |
| 180 | } |
| 181 | |
| 182 | err = -EINVAL; |
| 183 | for (i = 0; i < d->send_count; i++) { |
| 184 | idx = ind[i]; |
| 185 | if (idx < 0 || idx >= dma->buf_count) { |
| 186 | DRM_ERROR("Index %d (of %d max)\n", |
| 187 | ind[i], dma->buf_count - 1); |
| 188 | goto out; |
| 189 | } |
| 190 | buf = dma->buflist[ idx ]; |
| 191 | if (buf->filp != filp) { |
| 192 | DRM_ERROR("Process %d using buffer not owned\n", |
| 193 | current->pid); |
| 194 | goto out; |
| 195 | } |
| 196 | if (buf->list != DRM_LIST_NONE) { |
| 197 | DRM_ERROR("Process %d using buffer %d on list %d\n", |
| 198 | current->pid, buf->idx, buf->list); |
| 199 | goto out; |
| 200 | } |
| 201 | buf->used = ind[i]; |
| 202 | buf->while_locked = while_locked; |
| 203 | buf->context = d->context; |
| 204 | if (!buf->used) { |
| 205 | DRM_ERROR("Queueing 0 length buffer\n"); |
| 206 | } |
| 207 | if (buf->pending) { |
| 208 | DRM_ERROR("Queueing pending buffer:" |
| 209 | " buffer %d, offset %d\n", |
| 210 | ind[i], i); |
| 211 | goto out; |
| 212 | } |
| 213 | if (buf->waiting) { |
| 214 | DRM_ERROR("Queueing waiting buffer:" |
| 215 | " buffer %d, offset %d\n", |
| 216 | ind[i], i); |
| 217 | goto out; |
| 218 | } |
| 219 | buf->waiting = 1; |
| 220 | if (atomic_read(&q->use_count) == 1 |
| 221 | || atomic_read(&q->finalization)) { |
| 222 | DRM(free_buffer)(dev, buf); |
| 223 | } else { |
| 224 | DRM(waitlist_put)(&q->waitlist, buf); |
| 225 | atomic_inc(&q->total_queued); |
| 226 | } |
| 227 | } |
| 228 | atomic_dec(&q->use_count); |
| 229 | |
| 230 | return 0; |
| 231 | |
| 232 | out: |
| 233 | DRM(free)(ind, d->send_count * sizeof(int), DRM_MEM_DRIVER); |
| 234 | atomic_dec(&q->use_count); |
| 235 | return err; |
| 236 | } |
| 237 | |
| 238 | static int DRM(dma_get_buffers_of_order)(struct file *filp, drm_dma_t *d, |
| 239 | int order) |
| 240 | { |
| 241 | drm_file_t *priv = filp->private_data; |
| 242 | drm_device_t *dev = priv->dev; |
| 243 | int i; |
| 244 | drm_buf_t *buf; |
| 245 | drm_device_dma_t *dma = dev->dma; |
| 246 | |
| 247 | for (i = d->granted_count; i < d->request_count; i++) { |
| 248 | buf = DRM(freelist_get)(&dma->bufs[order].freelist, |
| 249 | d->flags & _DRM_DMA_WAIT); |
| 250 | if (!buf) break; |
| 251 | if (buf->pending || buf->waiting) { |
| 252 | DRM_ERROR("Free buffer %d in use: filp %p (w%d, p%d)\n", |
| 253 | buf->idx, |
| 254 | buf->filp, |
| 255 | buf->waiting, |
| 256 | buf->pending); |
| 257 | } |
| 258 | buf->filp = filp; |
| 259 | if (copy_to_user(&d->request_indices[i], |
| 260 | &buf->idx, |
| 261 | sizeof(buf->idx))) |
| 262 | return -EFAULT; |
| 263 | |
| 264 | if (copy_to_user(&d->request_sizes[i], |
| 265 | &buf->total, |
| 266 | sizeof(buf->total))) |
| 267 | return -EFAULT; |
| 268 | |
| 269 | ++d->granted_count; |
| 270 | } |
| 271 | return 0; |
| 272 | } |
| 273 | |
| 274 | |
| 275 | int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma) |
| 276 | { |
| 277 | int order; |
| 278 | int retcode = 0; |
| 279 | int tmp_order; |
| 280 | |
| 281 | order = DRM(order)(dma->request_size); |
| 282 | |
| 283 | dma->granted_count = 0; |
| 284 | retcode = DRM(dma_get_buffers_of_order)(filp, dma, order); |
| 285 | |
| 286 | if (dma->granted_count < dma->request_count |
| 287 | && (dma->flags & _DRM_DMA_SMALLER_OK)) { |
| 288 | for (tmp_order = order - 1; |
| 289 | !retcode |
| 290 | && dma->granted_count < dma->request_count |
| 291 | && tmp_order >= DRM_MIN_ORDER; |
| 292 | --tmp_order) { |
| 293 | |
| 294 | retcode = DRM(dma_get_buffers_of_order)(filp, dma, |
| 295 | tmp_order); |
| 296 | } |
| 297 | } |
| 298 | |
| 299 | if (dma->granted_count < dma->request_count |
| 300 | && (dma->flags & _DRM_DMA_LARGER_OK)) { |
| 301 | for (tmp_order = order + 1; |
| 302 | !retcode |
| 303 | && dma->granted_count < dma->request_count |
| 304 | && tmp_order <= DRM_MAX_ORDER; |
| 305 | ++tmp_order) { |
| 306 | |
| 307 | retcode = DRM(dma_get_buffers_of_order)(filp, dma, |
| 308 | tmp_order); |
| 309 | } |
| 310 | } |
| 311 | return 0; |
| 312 | } |
| 313 | |