blob: fcc8d244f46f0639c591c495398d017ff7409137 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/**
2 * \file drm_bufs.h
3 * Generic buffer template
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11 *
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include <linux/vmalloc.h>
37#include "drmP.h"
38
Dave Airlie836cf042005-07-10 19:27:04 +100039unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
Linus Torvalds1da177e2005-04-16 15:20:36 -070040{
Dave Airlie836cf042005-07-10 19:27:04 +100041 return pci_resource_start(dev->pdev, resource);
Linus Torvalds1da177e2005-04-16 15:20:36 -070042}
Dave Airlie836cf042005-07-10 19:27:04 +100043EXPORT_SYMBOL(drm_get_resource_start);
44
45unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
46{
47 return pci_resource_len(dev->pdev, resource);
48}
49EXPORT_SYMBOL(drm_get_resource_len);
50
51static drm_local_map_t *drm_find_matching_map(drm_device_t *dev,
52 drm_local_map_t *map)
53{
54 struct list_head *list;
55
56 list_for_each(list, &dev->maplist->head) {
57 drm_map_list_t *entry = list_entry(list, drm_map_list_t, head);
58 if (entry->map && map->type == entry->map->type &&
59 entry->map->offset == map->offset) {
60 return entry->map;
61 }
62 }
63
64 return NULL;
65}
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
Dave Airlie9a186642005-06-23 21:29:18 +100067#ifdef CONFIG_COMPAT
68/*
69 * Used to allocate 32-bit handles for _DRM_SHM regions
70 * The 0x10000000 value is chosen to be out of the way of
71 * FB/register and GART physical addresses.
72 */
73static unsigned int map32_handle = 0x10000000;
74#endif
75
Linus Torvalds1da177e2005-04-16 15:20:36 -070076/**
77 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
78 *
79 * \param inode device inode.
80 * \param filp file pointer.
81 * \param cmd command.
82 * \param arg pointer to a drm_map structure.
83 * \return zero on success or a negative value on error.
84 *
85 * Adjusts the memory offset to its absolute value according to the mapping
86 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
87 * applicable and if supported by the kernel.
88 */
Dave Airlie7ab98402005-07-10 16:56:52 +100089int drm_addmap(drm_device_t * dev, unsigned int offset,
90 unsigned int size, drm_map_type_t type,
91 drm_map_flags_t flags, drm_local_map_t ** map_ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070092{
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 drm_map_t *map;
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 drm_map_list_t *list;
Dave Airlie9c8da5e2005-07-10 15:38:56 +100095 drm_dma_handle_t *dmah;
Dave Airlie836cf042005-07-10 19:27:04 +100096 drm_local_map_t *found_map;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 map = drm_alloc( sizeof(*map), DRM_MEM_MAPS );
99 if ( !map )
100 return -ENOMEM;
101
Dave Airlie7ab98402005-07-10 16:56:52 +1000102 map->offset = offset;
103 map->size = size;
104 map->flags = flags;
105 map->type = type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
107 /* Only allow shared memory to be removable since we only keep enough
108 * book keeping information about shared memory to allow for removal
109 * when processes fork.
110 */
111 if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
112 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
113 return -EINVAL;
114 }
115 DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
116 map->offset, map->size, map->type );
117 if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
118 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
119 return -EINVAL;
120 }
121 map->mtrr = -1;
122 map->handle = NULL;
123
124 switch ( map->type ) {
125 case _DRM_REGISTERS:
126 case _DRM_FRAME_BUFFER:
127#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__)
128 if ( map->offset + map->size < map->offset ||
129 map->offset < virt_to_phys(high_memory) ) {
130 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
131 return -EINVAL;
132 }
133#endif
134#ifdef __alpha__
135 map->offset += dev->hose->mem_space->start;
136#endif
Dave Airlie836cf042005-07-10 19:27:04 +1000137 /* Some drivers preinitialize some maps, without the X Server
138 * needing to be aware of it. Therefore, we just return success
139 * when the server tries to create a duplicate map.
140 */
141 found_map = drm_find_matching_map(dev, map);
142 if (found_map != NULL) {
143 if (found_map->size != map->size) {
144 DRM_DEBUG("Matching maps of type %d with "
145 "mismatched sizes, (%ld vs %ld)\n",
146 map->type, map->size, found_map->size);
147 found_map->size = map->size;
148 }
149
150 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
151 *map_ptr = found_map;
152 return 0;
153 }
154
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 if (drm_core_has_MTRR(dev)) {
156 if ( map->type == _DRM_FRAME_BUFFER ||
157 (map->flags & _DRM_WRITE_COMBINING) ) {
158 map->mtrr = mtrr_add( map->offset, map->size,
159 MTRR_TYPE_WRCOMB, 1 );
160 }
161 }
162 if (map->type == _DRM_REGISTERS)
163 map->handle = drm_ioremap( map->offset, map->size,
164 dev );
165 break;
166
167 case _DRM_SHM:
168 map->handle = vmalloc_32(map->size);
169 DRM_DEBUG( "%lu %d %p\n",
170 map->size, drm_order( map->size ), map->handle );
171 if ( !map->handle ) {
172 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
173 return -ENOMEM;
174 }
175 map->offset = (unsigned long)map->handle;
176 if ( map->flags & _DRM_CONTAINS_LOCK ) {
177 /* Prevent a 2nd X Server from creating a 2nd lock */
178 if (dev->lock.hw_lock != NULL) {
179 vfree( map->handle );
180 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
181 return -EBUSY;
182 }
183 dev->sigdata.lock =
184 dev->lock.hw_lock = map->handle; /* Pointer to lock */
185 }
186 break;
187 case _DRM_AGP:
188 if (drm_core_has_AGP(dev)) {
189#ifdef __alpha__
190 map->offset += dev->hose->mem_space->start;
191#endif
192 map->offset += dev->agp->base;
193 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
194 }
195 break;
196 case _DRM_SCATTER_GATHER:
197 if (!dev->sg) {
198 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
199 return -EINVAL;
200 }
201 map->offset += dev->sg->handle;
202 break;
Dave Airlie2d0f9ea2005-07-10 14:34:13 +1000203 case _DRM_CONSISTENT:
Dave Airlie2d0f9ea2005-07-10 14:34:13 +1000204 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
Dave Airlie9c8da5e2005-07-10 15:38:56 +1000205 * As we're limiting the address to 2^32-1 (or less),
Dave Airlie2d0f9ea2005-07-10 14:34:13 +1000206 * casting it down to 32 bits is no problem, but we
207 * need to point to a 64bit variable first. */
Dave Airlie9c8da5e2005-07-10 15:38:56 +1000208 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
209 if (!dmah) {
Dave Airlie2d0f9ea2005-07-10 14:34:13 +1000210 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
211 return -ENOMEM;
212 }
Dave Airlie9c8da5e2005-07-10 15:38:56 +1000213 map->handle = dmah->vaddr;
214 map->offset = (unsigned long)dmah->busaddr;
215 kfree(dmah);
Dave Airlie2d0f9ea2005-07-10 14:34:13 +1000216 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 default:
218 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
219 return -EINVAL;
220 }
221
222 list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
223 if(!list) {
224 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
225 return -EINVAL;
226 }
227 memset(list, 0, sizeof(*list));
228 list->map = map;
229
230 down(&dev->struct_sem);
231 list_add(&list->head, &dev->maplist->head);
Dave Airlie9a186642005-06-23 21:29:18 +1000232#ifdef CONFIG_COMPAT
233 /* Assign a 32-bit handle for _DRM_SHM mappings */
234 /* We do it here so that dev->struct_sem protects the increment */
235 if (map->type == _DRM_SHM)
236 map->offset = map32_handle += PAGE_SIZE;
237#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 up(&dev->struct_sem);
239
Dave Airlie7ab98402005-07-10 16:56:52 +1000240 *map_ptr = map;
241 return 0;
242}
243EXPORT_SYMBOL(drm_addmap);
244
245int drm_addmap_ioctl(struct inode *inode, struct file *filp,
246 unsigned int cmd, unsigned long arg)
247{
248 drm_file_t *priv = filp->private_data;
249 drm_device_t *dev = priv->head->dev;
250 drm_map_t map;
251 drm_map_t *map_ptr;
252 drm_map_t __user *argp = (void __user *)arg;
253 int err;
254
255 if (!(filp->f_mode & 3))
256 return -EACCES; /* Require read/write */
257
258 if (copy_from_user(& map, argp, sizeof(map))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 return -EFAULT;
Dave Airlie7ab98402005-07-10 16:56:52 +1000260 }
261
262 err = drm_addmap( dev, map.offset, map.size, map.type, map.flags,
263 &map_ptr );
264
265 if (err) {
266 return err;
267 }
268
269 if (copy_to_user(argp, map_ptr, sizeof(*map_ptr)))
Dave Airlie9a186642005-06-23 21:29:18 +1000270 return -EFAULT;
Dave Airlie7ab98402005-07-10 16:56:52 +1000271 if (map_ptr->type != _DRM_SHM) {
272 if (copy_to_user(&argp->handle, &map_ptr->offset,
273 sizeof(map_ptr->offset)))
274 return -EFAULT;
275 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 return 0;
277}
278
279
280/**
281 * Remove a map private from list and deallocate resources if the mapping
282 * isn't in use.
283 *
284 * \param inode device inode.
285 * \param filp file pointer.
286 * \param cmd command.
287 * \param arg pointer to a drm_map_t structure.
288 * \return zero on success or a negative value on error.
289 *
290 * Searches the map on drm_device::maplist, removes it from the list, see if
291 * its being used, and free any associate resource (such as MTRR's) if it's not
292 * being on use.
293 *
Dave Airlie7ab98402005-07-10 16:56:52 +1000294 * \sa drm_addmap
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 */
Dave Airlie836cf042005-07-10 19:27:04 +1000296int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 struct list_head *list;
299 drm_map_list_t *r_list = NULL;
Dave Airlie836cf042005-07-10 19:27:04 +1000300 drm_dma_handle_t dmah;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301
Dave Airlie836cf042005-07-10 19:27:04 +1000302 /* Find the list entry for the map and remove it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 list_for_each(list, &dev->maplist->head) {
304 r_list = list_entry(list, drm_map_list_t, head);
305
Dave Airlie836cf042005-07-10 19:27:04 +1000306 if (r_list->map == map) {
307 list_del(list);
308 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
Dave Airlie2d0f9ea2005-07-10 14:34:13 +1000309 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 }
Dave Airlie836cf042005-07-10 19:27:04 +1000312
313 /* List has wrapped around to the head pointer, or it's empty and we
314 * didn't find anything.
315 */
316 if (list == (&dev->maplist->head)) {
317 return -EINVAL;
318 }
319
320 switch (map->type) {
321 case _DRM_REGISTERS:
322 drm_ioremapfree(map->handle, map->size, dev);
323 /* FALLTHROUGH */
324 case _DRM_FRAME_BUFFER:
325 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
326 int retcode;
327 retcode = mtrr_del(map->mtrr, map->offset,
328 map->size);
329 DRM_DEBUG ("mtrr_del=%d\n", retcode);
330 }
331 break;
332 case _DRM_SHM:
333 vfree(map->handle);
334 break;
335 case _DRM_AGP:
336 case _DRM_SCATTER_GATHER:
337 break;
338 case _DRM_CONSISTENT:
339 dmah.vaddr = map->handle;
340 dmah.busaddr = map->offset;
341 dmah.size = map->size;
342 __drm_pci_free(dev, &dmah);
343 break;
344 }
345 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
346
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 return 0;
348}
Dave Airlie836cf042005-07-10 19:27:04 +1000349EXPORT_SYMBOL(drm_rmmap_locked);
350
351int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
352{
353 int ret;
354
355 down(&dev->struct_sem);
356 ret = drm_rmmap_locked(dev, map);
357 up(&dev->struct_sem);
358
359 return ret;
360}
Dave Airlie7ab98402005-07-10 16:56:52 +1000361EXPORT_SYMBOL(drm_rmmap);
362
Dave Airlie836cf042005-07-10 19:27:04 +1000363/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
364 * the last close of the device, and this is necessary for cleanup when things
365 * exit uncleanly. Therefore, having userland manually remove mappings seems
366 * like a pointless exercise since they're going away anyway.
367 *
368 * One use case might be after addmap is allowed for normal users for SHM and
369 * gets used by drivers that the server doesn't need to care about. This seems
370 * unlikely.
371 */
Dave Airlie7ab98402005-07-10 16:56:52 +1000372int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
373 unsigned int cmd, unsigned long arg)
374{
375 drm_file_t *priv = filp->private_data;
376 drm_device_t *dev = priv->head->dev;
377 drm_map_t request;
Dave Airlie836cf042005-07-10 19:27:04 +1000378 drm_local_map_t *map = NULL;
379 struct list_head *list;
380 int ret;
Dave Airlie7ab98402005-07-10 16:56:52 +1000381
382 if (copy_from_user(&request, (drm_map_t __user *)arg, sizeof(request))) {
383 return -EFAULT;
384 }
385
Dave Airlie836cf042005-07-10 19:27:04 +1000386 down(&dev->struct_sem);
387 list_for_each(list, &dev->maplist->head) {
388 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
389
390 if (r_list->map &&
391 r_list->map->handle == request.handle &&
392 r_list->map->flags & _DRM_REMOVABLE) {
393 map = r_list->map;
394 break;
395 }
396 }
397
398 /* List has wrapped around to the head pointer, or its empty we didn't
399 * find anything.
400 */
401 if (list == (&dev->maplist->head)) {
402 up(&dev->struct_sem);
403 return -EINVAL;
404 }
405
406 if (!map)
407 return -EINVAL;
408
409 /* Register and framebuffer maps are permanent */
410 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
411 up(&dev->struct_sem);
412 return 0;
413 }
414
415 ret = drm_rmmap_locked(dev, map);
416
417 up(&dev->struct_sem);
418
419 return ret;
Dave Airlie7ab98402005-07-10 16:56:52 +1000420}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421
422/**
423 * Cleanup after an error on one of the addbufs() functions.
424 *
Dave Airlie836cf042005-07-10 19:27:04 +1000425 * \param dev DRM device.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 * \param entry buffer entry where the error occurred.
427 *
428 * Frees any pages and buffers associated with the given entry.
429 */
430static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
431{
432 int i;
433
434 if (entry->seg_count) {
435 for (i = 0; i < entry->seg_count; i++) {
436 if (entry->seglist[i]) {
437 drm_free_pages(entry->seglist[i],
438 entry->page_order,
439 DRM_MEM_DMA);
440 }
441 }
442 drm_free(entry->seglist,
443 entry->seg_count *
444 sizeof(*entry->seglist),
445 DRM_MEM_SEGS);
446
447 entry->seg_count = 0;
448 }
449
450 if (entry->buf_count) {
451 for (i = 0; i < entry->buf_count; i++) {
452 if (entry->buflist[i].dev_private) {
453 drm_free(entry->buflist[i].dev_private,
454 entry->buflist[i].dev_priv_size,
455 DRM_MEM_BUFS);
456 }
457 }
458 drm_free(entry->buflist,
459 entry->buf_count *
460 sizeof(*entry->buflist),
461 DRM_MEM_BUFS);
462
463 entry->buf_count = 0;
464 }
465}
466
467#if __OS_HAS_AGP
468/**
Dave Airlied59431b2005-07-10 15:00:06 +1000469 * Add AGP buffers for DMA transfers.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 *
Dave Airlied59431b2005-07-10 15:00:06 +1000471 * \param dev drm_device_t to which the buffers are to be added.
472 * \param request pointer to a drm_buf_desc_t describing the request.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 * \return zero on success or a negative number on failure.
474 *
475 * After some sanity checks creates a drm_buf structure for each buffer and
476 * reallocates the buffer list of the same size order to accommodate the new
477 * buffers.
478 */
Dave Airlieceb9c272005-07-10 17:07:23 +1000479int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 drm_device_dma_t *dma = dev->dma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 drm_buf_entry_t *entry;
483 drm_buf_t *buf;
484 unsigned long offset;
485 unsigned long agp_offset;
486 int count;
487 int order;
488 int size;
489 int alignment;
490 int page_order;
491 int total;
492 int byte_count;
493 int i;
494 drm_buf_t **temp_buflist;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495
496 if ( !dma ) return -EINVAL;
497
Dave Airlied59431b2005-07-10 15:00:06 +1000498 count = request->count;
499 order = drm_order(request->size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 size = 1 << order;
501
Dave Airlied59431b2005-07-10 15:00:06 +1000502 alignment = (request->flags & _DRM_PAGE_ALIGN)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 ? PAGE_ALIGN(size) : size;
504 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
505 total = PAGE_SIZE << page_order;
506
507 byte_count = 0;
Dave Airlied59431b2005-07-10 15:00:06 +1000508 agp_offset = dev->agp->base + request->agp_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509
510 DRM_DEBUG( "count: %d\n", count );
511 DRM_DEBUG( "order: %d\n", order );
512 DRM_DEBUG( "size: %d\n", size );
513 DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
514 DRM_DEBUG( "alignment: %d\n", alignment );
515 DRM_DEBUG( "page_order: %d\n", page_order );
516 DRM_DEBUG( "total: %d\n", total );
517
518 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
519 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
520
521 spin_lock( &dev->count_lock );
522 if ( dev->buf_use ) {
523 spin_unlock( &dev->count_lock );
524 return -EBUSY;
525 }
526 atomic_inc( &dev->buf_alloc );
527 spin_unlock( &dev->count_lock );
528
529 down( &dev->struct_sem );
530 entry = &dma->bufs[order];
531 if ( entry->buf_count ) {
532 up( &dev->struct_sem );
533 atomic_dec( &dev->buf_alloc );
534 return -ENOMEM; /* May only call once for each order */
535 }
536
537 if (count < 0 || count > 4096) {
538 up( &dev->struct_sem );
539 atomic_dec( &dev->buf_alloc );
540 return -EINVAL;
541 }
542
543 entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
544 DRM_MEM_BUFS );
545 if ( !entry->buflist ) {
546 up( &dev->struct_sem );
547 atomic_dec( &dev->buf_alloc );
548 return -ENOMEM;
549 }
550 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
551
552 entry->buf_size = size;
553 entry->page_order = page_order;
554
555 offset = 0;
556
557 while ( entry->buf_count < count ) {
558 buf = &entry->buflist[entry->buf_count];
559 buf->idx = dma->buf_count + entry->buf_count;
560 buf->total = alignment;
561 buf->order = order;
562 buf->used = 0;
563
564 buf->offset = (dma->byte_count + offset);
565 buf->bus_address = agp_offset + offset;
566 buf->address = (void *)(agp_offset + offset);
567 buf->next = NULL;
568 buf->waiting = 0;
569 buf->pending = 0;
570 init_waitqueue_head( &buf->dma_wait );
571 buf->filp = NULL;
572
573 buf->dev_priv_size = dev->driver->dev_priv_size;
574 buf->dev_private = drm_alloc( buf->dev_priv_size,
575 DRM_MEM_BUFS );
576 if(!buf->dev_private) {
577 /* Set count correctly so we free the proper amount. */
578 entry->buf_count = count;
579 drm_cleanup_buf_error(dev,entry);
580 up( &dev->struct_sem );
581 atomic_dec( &dev->buf_alloc );
582 return -ENOMEM;
583 }
584 memset( buf->dev_private, 0, buf->dev_priv_size );
585
586 DRM_DEBUG( "buffer %d @ %p\n",
587 entry->buf_count, buf->address );
588
589 offset += alignment;
590 entry->buf_count++;
591 byte_count += PAGE_SIZE << page_order;
592 }
593
594 DRM_DEBUG( "byte_count: %d\n", byte_count );
595
596 temp_buflist = drm_realloc( dma->buflist,
597 dma->buf_count * sizeof(*dma->buflist),
598 (dma->buf_count + entry->buf_count)
599 * sizeof(*dma->buflist),
600 DRM_MEM_BUFS );
601 if(!temp_buflist) {
602 /* Free the entry because it isn't valid */
603 drm_cleanup_buf_error(dev,entry);
604 up( &dev->struct_sem );
605 atomic_dec( &dev->buf_alloc );
606 return -ENOMEM;
607 }
608 dma->buflist = temp_buflist;
609
610 for ( i = 0 ; i < entry->buf_count ; i++ ) {
611 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
612 }
613
614 dma->buf_count += entry->buf_count;
615 dma->byte_count += byte_count;
616
617 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
618 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
619
620 up( &dev->struct_sem );
621
Dave Airlied59431b2005-07-10 15:00:06 +1000622 request->count = entry->buf_count;
623 request->size = size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624
625 dma->flags = _DRM_DMA_USE_AGP;
626
627 atomic_dec( &dev->buf_alloc );
628 return 0;
629}
Dave Airlied84f76d2005-07-10 17:04:22 +1000630EXPORT_SYMBOL(drm_addbufs_agp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631#endif /* __OS_HAS_AGP */
632
Dave Airlieceb9c272005-07-10 17:07:23 +1000633int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 drm_device_dma_t *dma = dev->dma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 int count;
637 int order;
638 int size;
639 int total;
640 int page_order;
641 drm_buf_entry_t *entry;
642 unsigned long page;
643 drm_buf_t *buf;
644 int alignment;
645 unsigned long offset;
646 int i;
647 int byte_count;
648 int page_count;
649 unsigned long *temp_pagelist;
650 drm_buf_t **temp_buflist;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651
652 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL;
653 if ( !dma ) return -EINVAL;
654
Dave Airlied59431b2005-07-10 15:00:06 +1000655 count = request->count;
656 order = drm_order(request->size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 size = 1 << order;
658
659 DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
Dave Airlied59431b2005-07-10 15:00:06 +1000660 request->count, request->size, size,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 order, dev->queue_count );
662
663 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
664 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
665
Dave Airlied59431b2005-07-10 15:00:06 +1000666 alignment = (request->flags & _DRM_PAGE_ALIGN)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 ? PAGE_ALIGN(size) : size;
668 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
669 total = PAGE_SIZE << page_order;
670
671 spin_lock( &dev->count_lock );
672 if ( dev->buf_use ) {
673 spin_unlock( &dev->count_lock );
674 return -EBUSY;
675 }
676 atomic_inc( &dev->buf_alloc );
677 spin_unlock( &dev->count_lock );
678
679 down( &dev->struct_sem );
680 entry = &dma->bufs[order];
681 if ( entry->buf_count ) {
682 up( &dev->struct_sem );
683 atomic_dec( &dev->buf_alloc );
684 return -ENOMEM; /* May only call once for each order */
685 }
686
687 if (count < 0 || count > 4096) {
688 up( &dev->struct_sem );
689 atomic_dec( &dev->buf_alloc );
690 return -EINVAL;
691 }
692
693 entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
694 DRM_MEM_BUFS );
695 if ( !entry->buflist ) {
696 up( &dev->struct_sem );
697 atomic_dec( &dev->buf_alloc );
698 return -ENOMEM;
699 }
700 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
701
702 entry->seglist = drm_alloc( count * sizeof(*entry->seglist),
703 DRM_MEM_SEGS );
704 if ( !entry->seglist ) {
705 drm_free( entry->buflist,
706 count * sizeof(*entry->buflist),
707 DRM_MEM_BUFS );
708 up( &dev->struct_sem );
709 atomic_dec( &dev->buf_alloc );
710 return -ENOMEM;
711 }
712 memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
713
714 /* Keep the original pagelist until we know all the allocations
715 * have succeeded
716 */
717 temp_pagelist = drm_alloc( (dma->page_count + (count << page_order))
718 * sizeof(*dma->pagelist),
719 DRM_MEM_PAGES );
720 if (!temp_pagelist) {
721 drm_free( entry->buflist,
722 count * sizeof(*entry->buflist),
723 DRM_MEM_BUFS );
724 drm_free( entry->seglist,
725 count * sizeof(*entry->seglist),
726 DRM_MEM_SEGS );
727 up( &dev->struct_sem );
728 atomic_dec( &dev->buf_alloc );
729 return -ENOMEM;
730 }
731 memcpy(temp_pagelist,
732 dma->pagelist,
733 dma->page_count * sizeof(*dma->pagelist));
734 DRM_DEBUG( "pagelist: %d entries\n",
735 dma->page_count + (count << page_order) );
736
737 entry->buf_size = size;
738 entry->page_order = page_order;
739 byte_count = 0;
740 page_count = 0;
741
742 while ( entry->buf_count < count ) {
743 page = drm_alloc_pages( page_order, DRM_MEM_DMA );
744 if ( !page ) {
745 /* Set count correctly so we free the proper amount. */
746 entry->buf_count = count;
747 entry->seg_count = count;
748 drm_cleanup_buf_error(dev, entry);
749 drm_free( temp_pagelist,
750 (dma->page_count + (count << page_order))
751 * sizeof(*dma->pagelist),
752 DRM_MEM_PAGES );
753 up( &dev->struct_sem );
754 atomic_dec( &dev->buf_alloc );
755 return -ENOMEM;
756 }
757 entry->seglist[entry->seg_count++] = page;
758 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
759 DRM_DEBUG( "page %d @ 0x%08lx\n",
760 dma->page_count + page_count,
761 page + PAGE_SIZE * i );
762 temp_pagelist[dma->page_count + page_count++]
763 = page + PAGE_SIZE * i;
764 }
765 for ( offset = 0 ;
766 offset + size <= total && entry->buf_count < count ;
767 offset += alignment, ++entry->buf_count ) {
768 buf = &entry->buflist[entry->buf_count];
769 buf->idx = dma->buf_count + entry->buf_count;
770 buf->total = alignment;
771 buf->order = order;
772 buf->used = 0;
773 buf->offset = (dma->byte_count + byte_count + offset);
774 buf->address = (void *)(page + offset);
775 buf->next = NULL;
776 buf->waiting = 0;
777 buf->pending = 0;
778 init_waitqueue_head( &buf->dma_wait );
779 buf->filp = NULL;
780
781 buf->dev_priv_size = dev->driver->dev_priv_size;
782 buf->dev_private = drm_alloc( buf->dev_priv_size,
783 DRM_MEM_BUFS );
784 if(!buf->dev_private) {
785 /* Set count correctly so we free the proper amount. */
786 entry->buf_count = count;
787 entry->seg_count = count;
788 drm_cleanup_buf_error(dev,entry);
789 drm_free( temp_pagelist,
790 (dma->page_count + (count << page_order))
791 * sizeof(*dma->pagelist),
792 DRM_MEM_PAGES );
793 up( &dev->struct_sem );
794 atomic_dec( &dev->buf_alloc );
795 return -ENOMEM;
796 }
797 memset( buf->dev_private, 0, buf->dev_priv_size );
798
799 DRM_DEBUG( "buffer %d @ %p\n",
800 entry->buf_count, buf->address );
801 }
802 byte_count += PAGE_SIZE << page_order;
803 }
804
805 temp_buflist = drm_realloc( dma->buflist,
806 dma->buf_count * sizeof(*dma->buflist),
807 (dma->buf_count + entry->buf_count)
808 * sizeof(*dma->buflist),
809 DRM_MEM_BUFS );
810 if (!temp_buflist) {
811 /* Free the entry because it isn't valid */
812 drm_cleanup_buf_error(dev,entry);
813 drm_free( temp_pagelist,
814 (dma->page_count + (count << page_order))
815 * sizeof(*dma->pagelist),
816 DRM_MEM_PAGES );
817 up( &dev->struct_sem );
818 atomic_dec( &dev->buf_alloc );
819 return -ENOMEM;
820 }
821 dma->buflist = temp_buflist;
822
823 for ( i = 0 ; i < entry->buf_count ; i++ ) {
824 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
825 }
826
827 /* No allocations failed, so now we can replace the orginal pagelist
828 * with the new one.
829 */
830 if (dma->page_count) {
831 drm_free(dma->pagelist,
832 dma->page_count * sizeof(*dma->pagelist),
833 DRM_MEM_PAGES);
834 }
835 dma->pagelist = temp_pagelist;
836
837 dma->buf_count += entry->buf_count;
838 dma->seg_count += entry->seg_count;
839 dma->page_count += entry->seg_count << page_order;
840 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
841
842 up( &dev->struct_sem );
843
Dave Airlied59431b2005-07-10 15:00:06 +1000844 request->count = entry->buf_count;
845 request->size = size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846
847 atomic_dec( &dev->buf_alloc );
848 return 0;
849
850}
Dave Airlied84f76d2005-07-10 17:04:22 +1000851EXPORT_SYMBOL(drm_addbufs_pci);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852
Dave Airlied59431b2005-07-10 15:00:06 +1000853static int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 drm_device_dma_t *dma = dev->dma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 drm_buf_entry_t *entry;
857 drm_buf_t *buf;
858 unsigned long offset;
859 unsigned long agp_offset;
860 int count;
861 int order;
862 int size;
863 int alignment;
864 int page_order;
865 int total;
866 int byte_count;
867 int i;
868 drm_buf_t **temp_buflist;
869
870 if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL;
871
872 if ( !dma ) return -EINVAL;
873
Dave Airlied59431b2005-07-10 15:00:06 +1000874 count = request->count;
875 order = drm_order(request->size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 size = 1 << order;
877
Dave Airlied59431b2005-07-10 15:00:06 +1000878 alignment = (request->flags & _DRM_PAGE_ALIGN)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 ? PAGE_ALIGN(size) : size;
880 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
881 total = PAGE_SIZE << page_order;
882
883 byte_count = 0;
Dave Airlied59431b2005-07-10 15:00:06 +1000884 agp_offset = request->agp_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885
886 DRM_DEBUG( "count: %d\n", count );
887 DRM_DEBUG( "order: %d\n", order );
888 DRM_DEBUG( "size: %d\n", size );
889 DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
890 DRM_DEBUG( "alignment: %d\n", alignment );
891 DRM_DEBUG( "page_order: %d\n", page_order );
892 DRM_DEBUG( "total: %d\n", total );
893
894 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
895 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
896
897 spin_lock( &dev->count_lock );
898 if ( dev->buf_use ) {
899 spin_unlock( &dev->count_lock );
900 return -EBUSY;
901 }
902 atomic_inc( &dev->buf_alloc );
903 spin_unlock( &dev->count_lock );
904
905 down( &dev->struct_sem );
906 entry = &dma->bufs[order];
907 if ( entry->buf_count ) {
908 up( &dev->struct_sem );
909 atomic_dec( &dev->buf_alloc );
910 return -ENOMEM; /* May only call once for each order */
911 }
912
913 if (count < 0 || count > 4096) {
914 up( &dev->struct_sem );
915 atomic_dec( &dev->buf_alloc );
916 return -EINVAL;
917 }
918
919 entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
920 DRM_MEM_BUFS );
921 if ( !entry->buflist ) {
922 up( &dev->struct_sem );
923 atomic_dec( &dev->buf_alloc );
924 return -ENOMEM;
925 }
926 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
927
928 entry->buf_size = size;
929 entry->page_order = page_order;
930
931 offset = 0;
932
933 while ( entry->buf_count < count ) {
934 buf = &entry->buflist[entry->buf_count];
935 buf->idx = dma->buf_count + entry->buf_count;
936 buf->total = alignment;
937 buf->order = order;
938 buf->used = 0;
939
940 buf->offset = (dma->byte_count + offset);
941 buf->bus_address = agp_offset + offset;
942 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
943 buf->next = NULL;
944 buf->waiting = 0;
945 buf->pending = 0;
946 init_waitqueue_head( &buf->dma_wait );
947 buf->filp = NULL;
948
949 buf->dev_priv_size = dev->driver->dev_priv_size;
950 buf->dev_private = drm_alloc( buf->dev_priv_size,
951 DRM_MEM_BUFS );
952 if(!buf->dev_private) {
953 /* Set count correctly so we free the proper amount. */
954 entry->buf_count = count;
955 drm_cleanup_buf_error(dev,entry);
956 up( &dev->struct_sem );
957 atomic_dec( &dev->buf_alloc );
958 return -ENOMEM;
959 }
960
961 memset( buf->dev_private, 0, buf->dev_priv_size );
962
963 DRM_DEBUG( "buffer %d @ %p\n",
964 entry->buf_count, buf->address );
965
966 offset += alignment;
967 entry->buf_count++;
968 byte_count += PAGE_SIZE << page_order;
969 }
970
971 DRM_DEBUG( "byte_count: %d\n", byte_count );
972
973 temp_buflist = drm_realloc( dma->buflist,
974 dma->buf_count * sizeof(*dma->buflist),
975 (dma->buf_count + entry->buf_count)
976 * sizeof(*dma->buflist),
977 DRM_MEM_BUFS );
978 if(!temp_buflist) {
979 /* Free the entry because it isn't valid */
980 drm_cleanup_buf_error(dev,entry);
981 up( &dev->struct_sem );
982 atomic_dec( &dev->buf_alloc );
983 return -ENOMEM;
984 }
985 dma->buflist = temp_buflist;
986
987 for ( i = 0 ; i < entry->buf_count ; i++ ) {
988 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
989 }
990
991 dma->buf_count += entry->buf_count;
992 dma->byte_count += byte_count;
993
994 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
995 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
996
997 up( &dev->struct_sem );
998
Dave Airlied59431b2005-07-10 15:00:06 +1000999 request->count = entry->buf_count;
1000 request->size = size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001
1002 dma->flags = _DRM_DMA_USE_SG;
1003
1004 atomic_dec( &dev->buf_alloc );
1005 return 0;
1006}
1007
Dave Airlied59431b2005-07-10 15:00:06 +10001008int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request)
Dave Airlieb84397d62005-07-10 14:46:12 +10001009{
Dave Airlieb84397d62005-07-10 14:46:12 +10001010 drm_device_dma_t *dma = dev->dma;
Dave Airlieb84397d62005-07-10 14:46:12 +10001011 drm_buf_entry_t *entry;
1012 drm_buf_t *buf;
1013 unsigned long offset;
1014 unsigned long agp_offset;
1015 int count;
1016 int order;
1017 int size;
1018 int alignment;
1019 int page_order;
1020 int total;
1021 int byte_count;
1022 int i;
1023 drm_buf_t **temp_buflist;
Dave Airlieb84397d62005-07-10 14:46:12 +10001024
1025 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1026 return -EINVAL;
1027
1028 if (!dma)
1029 return -EINVAL;
1030
Dave Airlied59431b2005-07-10 15:00:06 +10001031 count = request->count;
1032 order = drm_order(request->size);
Dave Airlieb84397d62005-07-10 14:46:12 +10001033 size = 1 << order;
1034
Dave Airlied59431b2005-07-10 15:00:06 +10001035 alignment = (request->flags & _DRM_PAGE_ALIGN)
Dave Airlieb84397d62005-07-10 14:46:12 +10001036 ? PAGE_ALIGN(size) : size;
1037 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1038 total = PAGE_SIZE << page_order;
1039
1040 byte_count = 0;
Dave Airlied59431b2005-07-10 15:00:06 +10001041 agp_offset = request->agp_start;
Dave Airlieb84397d62005-07-10 14:46:12 +10001042
1043 DRM_DEBUG("count: %d\n", count);
1044 DRM_DEBUG("order: %d\n", order);
1045 DRM_DEBUG("size: %d\n", size);
1046 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1047 DRM_DEBUG("alignment: %d\n", alignment);
1048 DRM_DEBUG("page_order: %d\n", page_order);
1049 DRM_DEBUG("total: %d\n", total);
1050
1051 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1052 return -EINVAL;
1053 if (dev->queue_count)
1054 return -EBUSY; /* Not while in use */
1055
1056 spin_lock(&dev->count_lock);
1057 if (dev->buf_use) {
1058 spin_unlock(&dev->count_lock);
1059 return -EBUSY;
1060 }
1061 atomic_inc(&dev->buf_alloc);
1062 spin_unlock(&dev->count_lock);
1063
1064 down(&dev->struct_sem);
1065 entry = &dma->bufs[order];
1066 if (entry->buf_count) {
1067 up(&dev->struct_sem);
1068 atomic_dec(&dev->buf_alloc);
1069 return -ENOMEM; /* May only call once for each order */
1070 }
1071
1072 if (count < 0 || count > 4096) {
1073 up(&dev->struct_sem);
1074 atomic_dec(&dev->buf_alloc);
1075 return -EINVAL;
1076 }
1077
1078 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1079 DRM_MEM_BUFS);
1080 if (!entry->buflist) {
1081 up(&dev->struct_sem);
1082 atomic_dec(&dev->buf_alloc);
1083 return -ENOMEM;
1084 }
1085 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1086
1087 entry->buf_size = size;
1088 entry->page_order = page_order;
1089
1090 offset = 0;
1091
1092 while (entry->buf_count < count) {
1093 buf = &entry->buflist[entry->buf_count];
1094 buf->idx = dma->buf_count + entry->buf_count;
1095 buf->total = alignment;
1096 buf->order = order;
1097 buf->used = 0;
1098
1099 buf->offset = (dma->byte_count + offset);
1100 buf->bus_address = agp_offset + offset;
1101 buf->address = (void *)(agp_offset + offset);
1102 buf->next = NULL;
1103 buf->waiting = 0;
1104 buf->pending = 0;
1105 init_waitqueue_head(&buf->dma_wait);
1106 buf->filp = NULL;
1107
1108 buf->dev_priv_size = dev->driver->dev_priv_size;
1109 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1110 if (!buf->dev_private) {
1111 /* Set count correctly so we free the proper amount. */
1112 entry->buf_count = count;
1113 drm_cleanup_buf_error(dev, entry);
1114 up(&dev->struct_sem);
1115 atomic_dec(&dev->buf_alloc);
1116 return -ENOMEM;
1117 }
1118 memset(buf->dev_private, 0, buf->dev_priv_size);
1119
1120 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1121
1122 offset += alignment;
1123 entry->buf_count++;
1124 byte_count += PAGE_SIZE << page_order;
1125 }
1126
1127 DRM_DEBUG("byte_count: %d\n", byte_count);
1128
1129 temp_buflist = drm_realloc(dma->buflist,
1130 dma->buf_count * sizeof(*dma->buflist),
1131 (dma->buf_count + entry->buf_count)
1132 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1133 if (!temp_buflist) {
1134 /* Free the entry because it isn't valid */
1135 drm_cleanup_buf_error(dev, entry);
1136 up(&dev->struct_sem);
1137 atomic_dec(&dev->buf_alloc);
1138 return -ENOMEM;
1139 }
1140 dma->buflist = temp_buflist;
1141
1142 for (i = 0; i < entry->buf_count; i++) {
1143 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1144 }
1145
1146 dma->buf_count += entry->buf_count;
1147 dma->byte_count += byte_count;
1148
1149 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1150 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1151
1152 up(&dev->struct_sem);
1153
Dave Airlied59431b2005-07-10 15:00:06 +10001154 request->count = entry->buf_count;
1155 request->size = size;
Dave Airlieb84397d62005-07-10 14:46:12 +10001156
1157 dma->flags = _DRM_DMA_USE_FB;
1158
1159 atomic_dec(&dev->buf_alloc);
1160 return 0;
1161}
1162
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163/**
1164 * Add buffers for DMA transfers (ioctl).
1165 *
1166 * \param inode device inode.
1167 * \param filp file pointer.
1168 * \param cmd command.
1169 * \param arg pointer to a drm_buf_desc_t request.
1170 * \return zero on success or a negative number on failure.
1171 *
1172 * According with the memory type specified in drm_buf_desc::flags and the
1173 * build options, it dispatches the call either to addbufs_agp(),
1174 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1175 * PCI memory respectively.
1176 */
1177int drm_addbufs( struct inode *inode, struct file *filp,
1178 unsigned int cmd, unsigned long arg )
1179{
1180 drm_buf_desc_t request;
1181 drm_file_t *priv = filp->private_data;
1182 drm_device_t *dev = priv->head->dev;
Dave Airlied59431b2005-07-10 15:00:06 +10001183 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184
1185 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1186 return -EINVAL;
1187
1188 if ( copy_from_user( &request, (drm_buf_desc_t __user *)arg,
1189 sizeof(request) ) )
1190 return -EFAULT;
1191
1192#if __OS_HAS_AGP
1193 if ( request.flags & _DRM_AGP_BUFFER )
Dave Airlied59431b2005-07-10 15:00:06 +10001194 ret=drm_addbufs_agp(dev, &request);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 else
1196#endif
1197 if ( request.flags & _DRM_SG_BUFFER )
Dave Airlied59431b2005-07-10 15:00:06 +10001198 ret=drm_addbufs_sg(dev, &request);
Dave Airlieb84397d62005-07-10 14:46:12 +10001199 else if ( request.flags & _DRM_FB_BUFFER)
Dave Airlied59431b2005-07-10 15:00:06 +10001200 ret=drm_addbufs_fb(dev, &request);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 else
Dave Airlied59431b2005-07-10 15:00:06 +10001202 ret=drm_addbufs_pci(dev, &request);
1203
1204 if (ret==0) {
1205 if (copy_to_user((void __user *)arg, &request,
1206 sizeof(request))) {
1207 ret = -EFAULT;
1208 }
1209 }
1210 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211}
1212
1213
1214/**
1215 * Get information about the buffer mappings.
1216 *
1217 * This was originally mean for debugging purposes, or by a sophisticated
1218 * client library to determine how best to use the available buffers (e.g.,
1219 * large buffers can be used for image transfer).
1220 *
1221 * \param inode device inode.
1222 * \param filp file pointer.
1223 * \param cmd command.
1224 * \param arg pointer to a drm_buf_info structure.
1225 * \return zero on success or a negative number on failure.
1226 *
1227 * Increments drm_device::buf_use while holding the drm_device::count_lock
1228 * lock, preventing of allocating more buffers after this call. Information
1229 * about each requested buffer is then copied into user space.
1230 */
1231int drm_infobufs( struct inode *inode, struct file *filp,
1232 unsigned int cmd, unsigned long arg )
1233{
1234 drm_file_t *priv = filp->private_data;
1235 drm_device_t *dev = priv->head->dev;
1236 drm_device_dma_t *dma = dev->dma;
1237 drm_buf_info_t request;
1238 drm_buf_info_t __user *argp = (void __user *)arg;
1239 int i;
1240 int count;
1241
1242 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1243 return -EINVAL;
1244
1245 if ( !dma ) return -EINVAL;
1246
1247 spin_lock( &dev->count_lock );
1248 if ( atomic_read( &dev->buf_alloc ) ) {
1249 spin_unlock( &dev->count_lock );
1250 return -EBUSY;
1251 }
1252 ++dev->buf_use; /* Can't allocate more after this call */
1253 spin_unlock( &dev->count_lock );
1254
1255 if ( copy_from_user( &request, argp, sizeof(request) ) )
1256 return -EFAULT;
1257
1258 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1259 if ( dma->bufs[i].buf_count ) ++count;
1260 }
1261
1262 DRM_DEBUG( "count = %d\n", count );
1263
1264 if ( request.count >= count ) {
1265 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1266 if ( dma->bufs[i].buf_count ) {
1267 drm_buf_desc_t __user *to = &request.list[count];
1268 drm_buf_entry_t *from = &dma->bufs[i];
1269 drm_freelist_t *list = &dma->bufs[i].freelist;
1270 if ( copy_to_user( &to->count,
1271 &from->buf_count,
1272 sizeof(from->buf_count) ) ||
1273 copy_to_user( &to->size,
1274 &from->buf_size,
1275 sizeof(from->buf_size) ) ||
1276 copy_to_user( &to->low_mark,
1277 &list->low_mark,
1278 sizeof(list->low_mark) ) ||
1279 copy_to_user( &to->high_mark,
1280 &list->high_mark,
1281 sizeof(list->high_mark) ) )
1282 return -EFAULT;
1283
1284 DRM_DEBUG( "%d %d %d %d %d\n",
1285 i,
1286 dma->bufs[i].buf_count,
1287 dma->bufs[i].buf_size,
1288 dma->bufs[i].freelist.low_mark,
1289 dma->bufs[i].freelist.high_mark );
1290 ++count;
1291 }
1292 }
1293 }
1294 request.count = count;
1295
1296 if ( copy_to_user( argp, &request, sizeof(request) ) )
1297 return -EFAULT;
1298
1299 return 0;
1300}
1301
1302/**
1303 * Specifies a low and high water mark for buffer allocation
1304 *
1305 * \param inode device inode.
1306 * \param filp file pointer.
1307 * \param cmd command.
1308 * \param arg a pointer to a drm_buf_desc structure.
1309 * \return zero on success or a negative number on failure.
1310 *
1311 * Verifies that the size order is bounded between the admissible orders and
1312 * updates the respective drm_device_dma::bufs entry low and high water mark.
1313 *
1314 * \note This ioctl is deprecated and mostly never used.
1315 */
1316int drm_markbufs( struct inode *inode, struct file *filp,
1317 unsigned int cmd, unsigned long arg )
1318{
1319 drm_file_t *priv = filp->private_data;
1320 drm_device_t *dev = priv->head->dev;
1321 drm_device_dma_t *dma = dev->dma;
1322 drm_buf_desc_t request;
1323 int order;
1324 drm_buf_entry_t *entry;
1325
1326 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1327 return -EINVAL;
1328
1329 if ( !dma ) return -EINVAL;
1330
1331 if ( copy_from_user( &request,
1332 (drm_buf_desc_t __user *)arg,
1333 sizeof(request) ) )
1334 return -EFAULT;
1335
1336 DRM_DEBUG( "%d, %d, %d\n",
1337 request.size, request.low_mark, request.high_mark );
1338 order = drm_order( request.size );
1339 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
1340 entry = &dma->bufs[order];
1341
1342 if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
1343 return -EINVAL;
1344 if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
1345 return -EINVAL;
1346
1347 entry->freelist.low_mark = request.low_mark;
1348 entry->freelist.high_mark = request.high_mark;
1349
1350 return 0;
1351}
1352
1353/**
1354 * Unreserve the buffers in list, previously reserved using drmDMA.
1355 *
1356 * \param inode device inode.
1357 * \param filp file pointer.
1358 * \param cmd command.
1359 * \param arg pointer to a drm_buf_free structure.
1360 * \return zero on success or a negative number on failure.
1361 *
1362 * Calls free_buffer() for each used buffer.
1363 * This function is primarily used for debugging.
1364 */
1365int drm_freebufs( struct inode *inode, struct file *filp,
1366 unsigned int cmd, unsigned long arg )
1367{
1368 drm_file_t *priv = filp->private_data;
1369 drm_device_t *dev = priv->head->dev;
1370 drm_device_dma_t *dma = dev->dma;
1371 drm_buf_free_t request;
1372 int i;
1373 int idx;
1374 drm_buf_t *buf;
1375
1376 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1377 return -EINVAL;
1378
1379 if ( !dma ) return -EINVAL;
1380
1381 if ( copy_from_user( &request,
1382 (drm_buf_free_t __user *)arg,
1383 sizeof(request) ) )
1384 return -EFAULT;
1385
1386 DRM_DEBUG( "%d\n", request.count );
1387 for ( i = 0 ; i < request.count ; i++ ) {
1388 if ( copy_from_user( &idx,
1389 &request.list[i],
1390 sizeof(idx) ) )
1391 return -EFAULT;
1392 if ( idx < 0 || idx >= dma->buf_count ) {
1393 DRM_ERROR( "Index %d (of %d max)\n",
1394 idx, dma->buf_count - 1 );
1395 return -EINVAL;
1396 }
1397 buf = dma->buflist[idx];
1398 if ( buf->filp != filp ) {
1399 DRM_ERROR( "Process %d freeing buffer not owned\n",
1400 current->pid );
1401 return -EINVAL;
1402 }
1403 drm_free_buffer( dev, buf );
1404 }
1405
1406 return 0;
1407}
1408
1409/**
1410 * Maps all of the DMA buffers into client-virtual space (ioctl).
1411 *
1412 * \param inode device inode.
1413 * \param filp file pointer.
1414 * \param cmd command.
1415 * \param arg pointer to a drm_buf_map structure.
1416 * \return zero on success or a negative number on failure.
1417 *
1418 * Maps the AGP or SG buffer region with do_mmap(), and copies information
1419 * about each buffer into user space. The PCI buffers are already mapped on the
1420 * addbufs_pci() call.
1421 */
1422int drm_mapbufs( struct inode *inode, struct file *filp,
1423 unsigned int cmd, unsigned long arg )
1424{
1425 drm_file_t *priv = filp->private_data;
1426 drm_device_t *dev = priv->head->dev;
1427 drm_device_dma_t *dma = dev->dma;
1428 drm_buf_map_t __user *argp = (void __user *)arg;
1429 int retcode = 0;
1430 const int zero = 0;
1431 unsigned long virtual;
1432 unsigned long address;
1433 drm_buf_map_t request;
1434 int i;
1435
1436 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1437 return -EINVAL;
1438
1439 if ( !dma ) return -EINVAL;
1440
1441 spin_lock( &dev->count_lock );
1442 if ( atomic_read( &dev->buf_alloc ) ) {
1443 spin_unlock( &dev->count_lock );
1444 return -EBUSY;
1445 }
1446 dev->buf_use++; /* Can't allocate more after this call */
1447 spin_unlock( &dev->count_lock );
1448
1449 if ( copy_from_user( &request, argp, sizeof(request) ) )
1450 return -EFAULT;
1451
1452 if ( request.count >= dma->buf_count ) {
Dave Airlieb84397d62005-07-10 14:46:12 +10001453 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1454 || (drm_core_check_feature(dev, DRIVER_SG)
1455 && (dma->flags & _DRM_DMA_USE_SG))
1456 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1457 && (dma->flags & _DRM_DMA_USE_FB))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 drm_map_t *map = dev->agp_buffer_map;
1459
1460 if ( !map ) {
1461 retcode = -EINVAL;
1462 goto done;
1463 }
1464
1465#if LINUX_VERSION_CODE <= 0x020402
1466 down( &current->mm->mmap_sem );
1467#else
1468 down_write( &current->mm->mmap_sem );
1469#endif
1470 virtual = do_mmap( filp, 0, map->size,
1471 PROT_READ | PROT_WRITE,
1472 MAP_SHARED,
1473 (unsigned long)map->offset );
1474#if LINUX_VERSION_CODE <= 0x020402
1475 up( &current->mm->mmap_sem );
1476#else
1477 up_write( &current->mm->mmap_sem );
1478#endif
1479 } else {
1480#if LINUX_VERSION_CODE <= 0x020402
1481 down( &current->mm->mmap_sem );
1482#else
1483 down_write( &current->mm->mmap_sem );
1484#endif
1485 virtual = do_mmap( filp, 0, dma->byte_count,
1486 PROT_READ | PROT_WRITE,
1487 MAP_SHARED, 0 );
1488#if LINUX_VERSION_CODE <= 0x020402
1489 up( &current->mm->mmap_sem );
1490#else
1491 up_write( &current->mm->mmap_sem );
1492#endif
1493 }
1494 if ( virtual > -1024UL ) {
1495 /* Real error */
1496 retcode = (signed long)virtual;
1497 goto done;
1498 }
1499 request.virtual = (void __user *)virtual;
1500
1501 for ( i = 0 ; i < dma->buf_count ; i++ ) {
1502 if ( copy_to_user( &request.list[i].idx,
1503 &dma->buflist[i]->idx,
1504 sizeof(request.list[0].idx) ) ) {
1505 retcode = -EFAULT;
1506 goto done;
1507 }
1508 if ( copy_to_user( &request.list[i].total,
1509 &dma->buflist[i]->total,
1510 sizeof(request.list[0].total) ) ) {
1511 retcode = -EFAULT;
1512 goto done;
1513 }
1514 if ( copy_to_user( &request.list[i].used,
1515 &zero,
1516 sizeof(zero) ) ) {
1517 retcode = -EFAULT;
1518 goto done;
1519 }
1520 address = virtual + dma->buflist[i]->offset; /* *** */
1521 if ( copy_to_user( &request.list[i].address,
1522 &address,
1523 sizeof(address) ) ) {
1524 retcode = -EFAULT;
1525 goto done;
1526 }
1527 }
1528 }
1529 done:
1530 request.count = dma->buf_count;
1531 DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1532
1533 if ( copy_to_user( argp, &request, sizeof(request) ) )
1534 return -EFAULT;
1535
1536 return retcode;
1537}
1538
Dave Airlie836cf042005-07-10 19:27:04 +10001539/**
1540 * Compute size order. Returns the exponent of the smaller power of two which
1541 * is greater or equal to given number.
1542 *
1543 * \param size size.
1544 * \return order.
1545 *
1546 * \todo Can be made faster.
1547 */
1548int drm_order( unsigned long size )
1549{
1550 int order;
1551 unsigned long tmp;
1552
1553 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
1554 ;
1555
1556 if (size & (size - 1))
1557 ++order;
1558
1559 return order;
1560}
1561EXPORT_SYMBOL(drm_order);