blob: 7e2cfd8c561a737085388e1309f8c0a815831d7c [file] [log] [blame]
Gareth Hughes36047532001-02-15 08:12:14 +00001/* drm_drv.h -- Generic driver template -*- linux-c -*-
2 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
3 *
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 */
31
32/*
33 * To use this template, you must at least define the following (samples
34 * given for the MGA driver):
35 *
36 * #define DRIVER_AUTHOR "VA Linux Systems, Inc."
37 *
38 * #define DRIVER_NAME "mga"
39 * #define DRIVER_DESC "Matrox G200/G400"
40 * #define DRIVER_DATE "20001127"
41 *
42 * #define DRIVER_MAJOR 2
43 * #define DRIVER_MINOR 0
44 * #define DRIVER_PATCHLEVEL 2
45 *
46 * #define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( mga_ioctls )
47 *
48 * #define DRM(x) mga_##x
49 */
50
51#ifndef __MUST_HAVE_AGP
52#define __MUST_HAVE_AGP 0
53#endif
54#ifndef __HAVE_CTX_BITMAP
55#define __HAVE_CTX_BITMAP 0
56#endif
57#ifndef __HAVE_DMA_IRQ
58#define __HAVE_DMA_IRQ 0
59#endif
60#ifndef __HAVE_DMA_QUEUE
61#define __HAVE_DMA_QUEUE 0
62#endif
63#ifndef __HAVE_MULTIPLE_DMA_QUEUES
64#define __HAVE_MULTIPLE_DMA_QUEUES 0
65#endif
66#ifndef __HAVE_DMA_SCHEDULE
67#define __HAVE_DMA_SCHEDULE 0
68#endif
69#ifndef __HAVE_DMA_FLUSH
70#define __HAVE_DMA_FLUSH 0
71#endif
72#ifndef __HAVE_DMA_READY
73#define __HAVE_DMA_READY 0
74#endif
75#ifndef __HAVE_DMA_QUIESCENT
76#define __HAVE_DMA_QUIESCENT 0
77#endif
78#ifndef __HAVE_RELEASE
79#define __HAVE_RELEASE 0
80#endif
81#ifndef __HAVE_COUNTERS
82#define __HAVE_COUNTERS 0
83#endif
Alan Hourihaneb804c092001-04-30 16:18:22 +000084#ifndef __HAVE_SG
85#define __HAVE_SG 0
86#endif
Jeff Hartmann51e38d92001-08-07 18:15:10 +000087#ifndef __HAVE_KERNEL_CTX_SWITCH
88#define __HAVE_KERNEL_CTX_SWITCH 0
89#endif
Gareth Hughes36047532001-02-15 08:12:14 +000090
91#ifndef DRIVER_PREINIT
92#define DRIVER_PREINIT()
93#endif
94#ifndef DRIVER_POSTINIT
95#define DRIVER_POSTINIT()
96#endif
97#ifndef DRIVER_PRERELEASE
98#define DRIVER_PRERELEASE()
99#endif
100#ifndef DRIVER_PRETAKEDOWN
101#define DRIVER_PRETAKEDOWN()
102#endif
Jeff Hartmann51e38d92001-08-07 18:15:10 +0000103#ifndef DRIVER_POSTCLEANUP
104#define DRIVER_POSTCLEANUP()
105#endif
106#ifndef DRIVER_PRESETUP
107#define DRIVER_PRESETUP()
108#endif
109#ifndef DRIVER_POSTSETUP
110#define DRIVER_POSTSETUP()
111#endif
Gareth Hughes36047532001-02-15 08:12:14 +0000112#ifndef DRIVER_IOCTLS
113#define DRIVER_IOCTLS
114#endif
Jeff Hartmann51e38d92001-08-07 18:15:10 +0000115#ifndef DRIVER_FOPS
Jeff Hartmann51e38d92001-08-07 18:15:10 +0000116#define DRIVER_FOPS \
117static struct file_operations DRM(fops) = { \
Rik Faith977b4202002-08-06 18:00:57 +0000118 .owner = THIS_MODULE, \
119 .open = DRM(open), \
120 .flush = DRM(flush), \
121 .release = DRM(release), \
122 .ioctl = DRM(ioctl), \
123 .mmap = DRM(mmap), \
124 .read = DRM(read), \
125 .fasync = DRM(fasync), \
126 .poll = DRM(poll), \
Jeff Hartmann51e38d92001-08-07 18:15:10 +0000127}
Jeff Hartmann51e38d92001-08-07 18:15:10 +0000128#endif
129
Alan Hourihane74ef13f2002-07-05 08:31:11 +0000130#ifndef MODULE
131/* DRM(options) is called by the kernel to parse command-line options
132 * passed via the boot-loader (e.g., LILO). It calls the insmod option
133 * routine, drm_parse_drm.
134 */
135/* Use an additional macro to avoid preprocessor troubles */
136#define DRM_OPTIONS_FUNC DRM(options)
137static int __init DRM(options)( char *str )
138{
139 DRM(parse_options)( str );
140 return 1;
141}
142
143__setup( DRIVER_NAME "=", DRM_OPTIONS_FUNC );
144#undef DRM_OPTIONS_FUNC
145#endif
Gareth Hughes36047532001-02-15 08:12:14 +0000146
David Dawes56bd9c22001-07-30 19:59:39 +0000147/*
148 * The default number of instances (minor numbers) to initialize.
149 */
150#ifndef DRIVER_NUM_CARDS
151#define DRIVER_NUM_CARDS 1
152#endif
Gareth Hughes36047532001-02-15 08:12:14 +0000153
David Dawes56bd9c22001-07-30 19:59:39 +0000154static drm_device_t *DRM(device);
155static int *DRM(minor);
156static int DRM(numdevs) = 0;
Gareth Hughes36047532001-02-15 08:12:14 +0000157
Jeff Hartmann51e38d92001-08-07 18:15:10 +0000158DRIVER_FOPS;
Gareth Hughes36047532001-02-15 08:12:14 +0000159
Kevin E Martin74e19a42001-03-14 22:22:50 +0000160static drm_ioctl_desc_t DRM(ioctls)[] = {
161 [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { DRM(version), 0, 0 },
162 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { DRM(getunique), 0, 0 },
163 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { DRM(getmagic), 0, 0 },
164 [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { DRM(irq_busid), 0, 1 },
165 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = { DRM(getmap), 0, 0 },
166 [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = { DRM(getclient), 0, 0 },
167 [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = { DRM(getstats), 0, 0 },
Gareth Hughes36047532001-02-15 08:12:14 +0000168
Kevin E Martin74e19a42001-03-14 22:22:50 +0000169 [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { DRM(setunique), 1, 1 },
170 [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { DRM(block), 1, 1 },
171 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { DRM(unblock), 1, 1 },
172 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { DRM(authmagic), 1, 1 },
Gareth Hughes36047532001-02-15 08:12:14 +0000173
Kevin E Martin74e19a42001-03-14 22:22:50 +0000174 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { DRM(addmap), 1, 1 },
175 [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { DRM(rmmap), 1, 0 },
Gareth Hughes36047532001-02-15 08:12:14 +0000176
Alan Hourihaneb804c092001-04-30 16:18:22 +0000177#if __HAVE_CTX_BITMAP
Kevin E Martin74e19a42001-03-14 22:22:50 +0000178 [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { DRM(setsareactx), 1, 1 },
179 [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { DRM(getsareactx), 1, 0 },
Alan Hourihaneb804c092001-04-30 16:18:22 +0000180#endif
Gareth Hughes36047532001-02-15 08:12:14 +0000181
Kevin E Martin74e19a42001-03-14 22:22:50 +0000182 [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { DRM(addctx), 1, 1 },
183 [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { DRM(rmctx), 1, 1 },
184 [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { DRM(modctx), 1, 1 },
185 [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { DRM(getctx), 1, 0 },
186 [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { DRM(switchctx), 1, 1 },
187 [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { DRM(newctx), 1, 1 },
188 [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { DRM(resctx), 1, 0 },
Gareth Hughes36047532001-02-15 08:12:14 +0000189
Kevin E Martin74e19a42001-03-14 22:22:50 +0000190 [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { DRM(adddraw), 1, 1 },
191 [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { DRM(rmdraw), 1, 1 },
192
193 [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { DRM(lock), 1, 0 },
194 [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { DRM(unlock), 1, 0 },
195 [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(finish), 1, 0 },
Gareth Hughes36047532001-02-15 08:12:14 +0000196
197#if __HAVE_DMA
Kevin E Martin74e19a42001-03-14 22:22:50 +0000198 [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { DRM(addbufs), 1, 1 },
199 [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { DRM(markbufs), 1, 1 },
200 [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { DRM(infobufs), 1, 0 },
201 [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { DRM(mapbufs), 1, 0 },
202 [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { DRM(freebufs), 1, 0 },
Gareth Hughes36047532001-02-15 08:12:14 +0000203
204 /* The DRM_IOCTL_DMA ioctl should be defined by the driver.
205 */
Kevin E Martin74e19a42001-03-14 22:22:50 +0000206 [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { DRM(control), 1, 1 },
Gareth Hughes36047532001-02-15 08:12:14 +0000207#endif
Gareth Hughes36047532001-02-15 08:12:14 +0000208
209#if __REALLY_HAVE_AGP
Kevin E Martin74e19a42001-03-14 22:22:50 +0000210 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { DRM(agp_acquire), 1, 1 },
211 [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { DRM(agp_release), 1, 1 },
212 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { DRM(agp_enable), 1, 1 },
213 [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { DRM(agp_info), 1, 0 },
214 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { DRM(agp_alloc), 1, 1 },
215 [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { DRM(agp_free), 1, 1 },
216 [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { DRM(agp_bind), 1, 1 },
217 [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { DRM(agp_unbind), 1, 1 },
Gareth Hughes36047532001-02-15 08:12:14 +0000218#endif
219
Alan Hourihaneb804c092001-04-30 16:18:22 +0000220#if __HAVE_SG
221 [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = { DRM(sg_alloc), 1, 1 },
222 [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = { DRM(sg_free), 1, 1 },
223#endif
224
Gareth Hughes36047532001-02-15 08:12:14 +0000225 DRIVER_IOCTLS
226};
227
228#define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( DRM(ioctls) )
229
230#ifdef MODULE
231static char *drm_opts = NULL;
232#endif
233
234MODULE_AUTHOR( DRIVER_AUTHOR );
235MODULE_DESCRIPTION( DRIVER_DESC );
236MODULE_PARM( drm_opts, "s" );
Alan Hourihaneca820fc2001-09-25 09:32:16 +0000237MODULE_LICENSE("GPL and additional rights");
Gareth Hughes36047532001-02-15 08:12:14 +0000238
Gareth Hughes36047532001-02-15 08:12:14 +0000239static int DRM(setup)( drm_device_t *dev )
240{
241 int i;
242
Jeff Hartmann51e38d92001-08-07 18:15:10 +0000243 DRIVER_PRESETUP();
Gareth Hughes36047532001-02-15 08:12:14 +0000244 atomic_set( &dev->ioctl_count, 0 );
245 atomic_set( &dev->vma_count, 0 );
246 dev->buf_use = 0;
247 atomic_set( &dev->buf_alloc, 0 );
248
249#if __HAVE_DMA
250 i = DRM(dma_setup)( dev );
251 if ( i < 0 )
252 return i;
253#endif
254
255 dev->counters = 6 + __HAVE_COUNTERS;
256 dev->types[0] = _DRM_STAT_LOCK;
257 dev->types[1] = _DRM_STAT_OPENS;
258 dev->types[2] = _DRM_STAT_CLOSES;
259 dev->types[3] = _DRM_STAT_IOCTLS;
260 dev->types[4] = _DRM_STAT_LOCKS;
261 dev->types[5] = _DRM_STAT_UNLOCKS;
262#ifdef __HAVE_COUNTER6
263 dev->types[6] = __HAVE_COUNTER6;
264#endif
265#ifdef __HAVE_COUNTER7
266 dev->types[7] = __HAVE_COUNTER7;
267#endif
268#ifdef __HAVE_COUNTER8
269 dev->types[8] = __HAVE_COUNTER8;
270#endif
271#ifdef __HAVE_COUNTER9
272 dev->types[9] = __HAVE_COUNTER9;
273#endif
274#ifdef __HAVE_COUNTER10
275 dev->types[10] = __HAVE_COUNTER10;
276#endif
277#ifdef __HAVE_COUNTER11
278 dev->types[11] = __HAVE_COUNTER11;
279#endif
280#ifdef __HAVE_COUNTER12
281 dev->types[12] = __HAVE_COUNTER12;
282#endif
283#ifdef __HAVE_COUNTER13
284 dev->types[13] = __HAVE_COUNTER13;
285#endif
286#ifdef __HAVE_COUNTER14
287 dev->types[14] = __HAVE_COUNTER14;
288#endif
289#ifdef __HAVE_COUNTER15
290 dev->types[14] = __HAVE_COUNTER14;
291#endif
292
293 for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ )
294 atomic_set( &dev->counts[i], 0 );
295
296 for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
297 dev->magiclist[i].head = NULL;
298 dev->magiclist[i].tail = NULL;
299 }
Kevin E Martin74e19a42001-03-14 22:22:50 +0000300
301 dev->maplist = DRM(alloc)(sizeof(*dev->maplist),
302 DRM_MEM_MAPS);
303 if(dev->maplist == NULL) return -ENOMEM;
304 memset(dev->maplist, 0, sizeof(*dev->maplist));
305 INIT_LIST_HEAD(&dev->maplist->head);
Gareth Hughes36047532001-02-15 08:12:14 +0000306 dev->map_count = 0;
Kevin E Martin74e19a42001-03-14 22:22:50 +0000307
Gareth Hughes36047532001-02-15 08:12:14 +0000308 dev->vmalist = NULL;
Michel Daenzercfa1a912002-09-21 23:18:54 +0000309 dev->sigdata.lock = dev->lock.hw_lock = NULL;
Gareth Hughes36047532001-02-15 08:12:14 +0000310 init_waitqueue_head( &dev->lock.lock_queue );
311 dev->queue_count = 0;
312 dev->queue_reserved = 0;
313 dev->queue_slots = 0;
314 dev->queuelist = NULL;
315 dev->irq = 0;
316 dev->context_flag = 0;
317 dev->interrupt_flag = 0;
318 dev->dma_flag = 0;
319 dev->last_context = 0;
320 dev->last_switch = 0;
321 dev->last_checked = 0;
322 init_timer( &dev->timer );
323 init_waitqueue_head( &dev->context_wait );
324
325 dev->ctx_start = 0;
326 dev->lck_start = 0;
327
328 dev->buf_rp = dev->buf;
329 dev->buf_wp = dev->buf;
330 dev->buf_end = dev->buf + DRM_BSZ;
331 dev->buf_async = NULL;
332 init_waitqueue_head( &dev->buf_readers );
333 init_waitqueue_head( &dev->buf_writers );
334
335 DRM_DEBUG( "\n" );
336
337 /* The kernel's context could be created here, but is now created
338 * in drm_dma_enqueue. This is more resource-efficient for
339 * hardware that does not do DMA, but may mean that
340 * drm_select_queue fails between the time the interrupt is
341 * initialized and the time the queues are initialized.
342 */
Jeff Hartmann51e38d92001-08-07 18:15:10 +0000343 DRIVER_POSTSETUP();
Gareth Hughes36047532001-02-15 08:12:14 +0000344 return 0;
345}
346
347
348static int DRM(takedown)( drm_device_t *dev )
349{
350 drm_magic_entry_t *pt, *next;
351 drm_map_t *map;
Kevin E Martin74e19a42001-03-14 22:22:50 +0000352 drm_map_list_t *r_list;
Jeff Hartmann51e38d92001-08-07 18:15:10 +0000353 struct list_head *list, *list_next;
Gareth Hughes36047532001-02-15 08:12:14 +0000354 drm_vma_entry_t *vma, *vma_next;
355 int i;
356
357 DRM_DEBUG( "\n" );
358
359 DRIVER_PRETAKEDOWN();
360#if __HAVE_DMA_IRQ
361 if ( dev->irq ) DRM(irq_uninstall)( dev );
362#endif
363
364 down( &dev->struct_sem );
365 del_timer( &dev->timer );
366
367 if ( dev->devname ) {
368 DRM(free)( dev->devname, strlen( dev->devname ) + 1,
369 DRM_MEM_DRIVER );
370 dev->devname = NULL;
371 }
372
373 if ( dev->unique ) {
374 DRM(free)( dev->unique, strlen( dev->unique ) + 1,
375 DRM_MEM_DRIVER );
376 dev->unique = NULL;
377 dev->unique_len = 0;
378 }
379 /* Clear pid list */
380 for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
381 for ( pt = dev->magiclist[i].head ; pt ; pt = next ) {
382 next = pt->next;
383 DRM(free)( pt, sizeof(*pt), DRM_MEM_MAGIC );
384 }
385 dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
386 }
387
388#if __REALLY_HAVE_AGP
389 /* Clear AGP information */
390 if ( dev->agp ) {
391 drm_agp_mem_t *entry;
392 drm_agp_mem_t *nexte;
393
394 /* Remove AGP resources, but leave dev->agp
395 intact until drv_cleanup is called. */
396 for ( entry = dev->agp->memory ; entry ; entry = nexte ) {
397 nexte = entry->next;
398 if ( entry->bound ) DRM(unbind_agp)( entry->memory );
399 DRM(free_agp)( entry->memory, entry->pages );
400 DRM(free)( entry, sizeof(*entry), DRM_MEM_AGPLISTS );
401 }
402 dev->agp->memory = NULL;
403
404 if ( dev->agp->acquired ) DRM(agp_do_release)();
405
406 dev->agp->acquired = 0;
407 dev->agp->enabled = 0;
408 }
409#endif
410
411 /* Clear vma list (only built for debugging) */
412 if ( dev->vmalist ) {
413 for ( vma = dev->vmalist ; vma ; vma = vma_next ) {
414 vma_next = vma->next;
415 DRM(free)( vma, sizeof(*vma), DRM_MEM_VMAS );
416 }
417 dev->vmalist = NULL;
418 }
419
Kevin E Martin74e19a42001-03-14 22:22:50 +0000420 if( dev->maplist ) {
Jeff Hartmann51e38d92001-08-07 18:15:10 +0000421 for(list = dev->maplist->head.next;
422 list != &dev->maplist->head;
423 list = list_next) {
424 list_next = list->next;
Kevin E Martin74e19a42001-03-14 22:22:50 +0000425 r_list = (drm_map_list_t *)list;
426 map = r_list->map;
427 DRM(free)(r_list, sizeof(*r_list), DRM_MEM_MAPS);
428 if(!map) continue;
429
Gareth Hughes36047532001-02-15 08:12:14 +0000430 switch ( map->type ) {
431 case _DRM_REGISTERS:
432 case _DRM_FRAME_BUFFER:
433#if __REALLY_HAVE_MTRR
434 if ( map->mtrr >= 0 ) {
435 int retcode;
436 retcode = mtrr_del( map->mtrr,
437 map->offset,
438 map->size );
439 DRM_DEBUG( "mtrr_del=%d\n", retcode );
440 }
441#endif
442 DRM(ioremapfree)( map->handle, map->size );
443 break;
444 case _DRM_SHM:
Kevin E Martin74e19a42001-03-14 22:22:50 +0000445 vfree(map->handle);
Gareth Hughes36047532001-02-15 08:12:14 +0000446 break;
Kevin E Martin74e19a42001-03-14 22:22:50 +0000447
Gareth Hughes36047532001-02-15 08:12:14 +0000448 case _DRM_AGP:
449 /* Do nothing here, because this is all
450 * handled in the AGP/GART driver.
451 */
452 break;
Alan Hourihaneb804c092001-04-30 16:18:22 +0000453 case _DRM_SCATTER_GATHER:
454 /* Handle it, but do nothing, if HAVE_SG
455 * isn't defined.
456 */
457#if __HAVE_SG
458 if(dev->sg) {
459 DRM(sg_cleanup)(dev->sg);
460 dev->sg = NULL;
461 }
462#endif
463 break;
Gareth Hughes36047532001-02-15 08:12:14 +0000464 }
Kevin E Martin74e19a42001-03-14 22:22:50 +0000465 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
466 }
467 DRM(free)(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
Gareth Hughes36047532001-02-15 08:12:14 +0000468 dev->maplist = NULL;
Kevin E Martin74e19a42001-03-14 22:22:50 +0000469 }
Gareth Hughes36047532001-02-15 08:12:14 +0000470
471#if __HAVE_DMA_QUEUE || __HAVE_MULTIPLE_DMA_QUEUES
472 if ( dev->queuelist ) {
473 for ( i = 0 ; i < dev->queue_count ; i++ ) {
474 DRM(waitlist_destroy)( &dev->queuelist[i]->waitlist );
475 if ( dev->queuelist[i] ) {
476 DRM(free)( dev->queuelist[i],
477 sizeof(*dev->queuelist[0]),
478 DRM_MEM_QUEUES );
479 dev->queuelist[i] = NULL;
480 }
481 }
482 DRM(free)( dev->queuelist,
483 dev->queue_slots * sizeof(*dev->queuelist),
484 DRM_MEM_QUEUES );
485 dev->queuelist = NULL;
486 }
487 dev->queue_count = 0;
488#endif
489
490#if __HAVE_DMA
491 DRM(dma_takedown)( dev );
492#endif
493 if ( dev->lock.hw_lock ) {
Michel Daenzercfa1a912002-09-21 23:18:54 +0000494 dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
Gareth Hughes36047532001-02-15 08:12:14 +0000495 dev->lock.pid = 0;
496 wake_up_interruptible( &dev->lock.lock_queue );
497 }
498 up( &dev->struct_sem );
499
500 return 0;
501}
502
David Dawes56bd9c22001-07-30 19:59:39 +0000503/*
504 * Figure out how many instances to initialize.
505 */
506static int drm_count_cards(void)
507{
508 int num = 0;
509#if defined(DRIVER_CARD_LIST)
Jeff Hartmannb6923b32001-08-08 16:10:47 +0000510 int i;
511 drm_pci_list_t *l;
512 u16 device, vendor;
David Dawes56bd9c22001-07-30 19:59:39 +0000513 struct pci_dev *pdev = NULL;
514#endif
515
516 DRM_DEBUG( "\n" );
517
518#if defined(DRIVER_COUNT_CARDS)
519 num = DRIVER_COUNT_CARDS();
520#elif defined(DRIVER_CARD_LIST)
Jeff Hartmannb6923b32001-08-08 16:10:47 +0000521 for (i = 0, l = DRIVER_CARD_LIST; l[i].vendor != 0; i++) {
David Dawes56bd9c22001-07-30 19:59:39 +0000522 pdev = NULL;
Jeff Hartmannb6923b32001-08-08 16:10:47 +0000523 vendor = l[i].vendor;
524 device = l[i].device;
525 if(device == 0xffff) device = PCI_ANY_ID;
526 if(vendor == 0xffff) vendor = PCI_ANY_ID;
David Dawes56bd9c22001-07-30 19:59:39 +0000527 while ((pdev = pci_find_device(vendor, device, pdev))) {
528 num++;
529 }
530 }
531#else
532 num = DRIVER_NUM_CARDS;
533#endif
534 DRM_DEBUG("numdevs = %d\n", num);
535 return num;
536}
537
Gareth Hughes36047532001-02-15 08:12:14 +0000538/* drm_init is called via init_module at module load time, or via
539 * linux/init/main.c (this is not currently supported).
540 */
541static int __init drm_init( void )
542{
David Dawes56bd9c22001-07-30 19:59:39 +0000543
544 drm_device_t *dev;
545 int i;
Gareth Hughes36047532001-02-15 08:12:14 +0000546#if __HAVE_CTX_BITMAP
547 int retcode;
548#endif
549 DRM_DEBUG( "\n" );
550
Gareth Hughes36047532001-02-15 08:12:14 +0000551#ifdef MODULE
552 DRM(parse_options)( drm_opts );
553#endif
David Dawes56bd9c22001-07-30 19:59:39 +0000554
555 DRM(numdevs) = drm_count_cards();
556 /* Force at least one instance. */
557 if (DRM(numdevs) <= 0)
558 DRM(numdevs) = 1;
559
560 DRM(device) = kmalloc(sizeof(*DRM(device)) * DRM(numdevs), GFP_KERNEL);
561 if (!DRM(device)) {
562 return -ENOMEM;
563 }
564 DRM(minor) = kmalloc(sizeof(*DRM(minor)) * DRM(numdevs), GFP_KERNEL);
565 if (!DRM(minor)) {
566 kfree(DRM(device));
567 return -ENOMEM;
568 }
569
Gareth Hughes36047532001-02-15 08:12:14 +0000570 DRIVER_PREINIT();
571
572 DRM(mem_init)();
573
David Dawes56bd9c22001-07-30 19:59:39 +0000574 for (i = 0; i < DRM(numdevs); i++) {
575 dev = &(DRM(device)[i]);
576 memset( (void *)dev, 0, sizeof(*dev) );
577 dev->count_lock = SPIN_LOCK_UNLOCKED;
578 sema_init( &dev->struct_sem, 1 );
579
580 if ((DRM(minor)[i] = DRM(stub_register)(DRIVER_NAME, &DRM(fops),dev)) < 0)
581 return -EPERM;
582 dev->device = MKDEV(DRM_MAJOR, DRM(minor)[i] );
583 dev->name = DRIVER_NAME;
Gareth Hughes36047532001-02-15 08:12:14 +0000584
585#if __REALLY_HAVE_AGP
David Dawes56bd9c22001-07-30 19:59:39 +0000586 dev->agp = DRM(agp_init)();
Gareth Hughes36047532001-02-15 08:12:14 +0000587#if __MUST_HAVE_AGP
David Dawes56bd9c22001-07-30 19:59:39 +0000588 if ( dev->agp == NULL ) {
589 DRM_ERROR( "Cannot initialize the agpgart module.\n" );
590 DRM(stub_unregister)(DRM(minor)[i]);
591 DRM(takedown)( dev );
592 return -ENOMEM;
593 }
Alan Hourihaned1774bb2001-03-05 15:52:11 +0000594#endif
Gareth Hughes36047532001-02-15 08:12:14 +0000595#if __REALLY_HAVE_MTRR
David Dawes56bd9c22001-07-30 19:59:39 +0000596 if (dev->agp)
597 dev->agp->agp_mtrr = mtrr_add( dev->agp->agp_info.aper_base,
Gareth Hughes36047532001-02-15 08:12:14 +0000598 dev->agp->agp_info.aper_size*1024*1024,
599 MTRR_TYPE_WRCOMB,
600 1 );
601#endif
602#endif
603
604#if __HAVE_CTX_BITMAP
David Dawes56bd9c22001-07-30 19:59:39 +0000605 retcode = DRM(ctxbitmap_init)( dev );
606 if( retcode ) {
607 DRM_ERROR( "Cannot allocate memory for context bitmap.\n" );
608 DRM(stub_unregister)(DRM(minor)[i]);
609 DRM(takedown)( dev );
610 return retcode;
611 }
Gareth Hughes36047532001-02-15 08:12:14 +0000612#endif
David Dawes56bd9c22001-07-30 19:59:39 +0000613 DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d\n",
614 DRIVER_NAME,
615 DRIVER_MAJOR,
616 DRIVER_MINOR,
617 DRIVER_PATCHLEVEL,
618 DRIVER_DATE,
619 DRM(minor)[i] );
620 }
Gareth Hughes36047532001-02-15 08:12:14 +0000621
622 DRIVER_POSTINIT();
623
Gareth Hughes36047532001-02-15 08:12:14 +0000624 return 0;
625}
626
627/* drm_cleanup is called via cleanup_module at module unload time.
628 */
629static void __exit drm_cleanup( void )
630{
David Dawes56bd9c22001-07-30 19:59:39 +0000631 drm_device_t *dev;
632 int i;
Gareth Hughes36047532001-02-15 08:12:14 +0000633
634 DRM_DEBUG( "\n" );
635
David Dawes56bd9c22001-07-30 19:59:39 +0000636 for (i = DRM(numdevs) - 1; i >= 0; i--) {
637 dev = &(DRM(device)[i]);
638 if ( DRM(stub_unregister)(DRM(minor)[i]) ) {
639 DRM_ERROR( "Cannot unload module\n" );
640 } else {
641 DRM_DEBUG("minor %d unregistered\n", DRM(minor)[i]);
642 if (i == 0) {
643 DRM_INFO( "Module unloaded\n" );
644 }
645 }
Gareth Hughes36047532001-02-15 08:12:14 +0000646#if __HAVE_CTX_BITMAP
David Dawes56bd9c22001-07-30 19:59:39 +0000647 DRM(ctxbitmap_cleanup)( dev );
Gareth Hughes36047532001-02-15 08:12:14 +0000648#endif
649
650#if __REALLY_HAVE_AGP && __REALLY_HAVE_MTRR
Jeff Hartmannb6923b32001-08-08 16:10:47 +0000651 if ( dev->agp && dev->agp->agp_mtrr >= 0) {
David Dawes56bd9c22001-07-30 19:59:39 +0000652 int retval;
653 retval = mtrr_del( dev->agp->agp_mtrr,
Gareth Hughes36047532001-02-15 08:12:14 +0000654 dev->agp->agp_info.aper_base,
655 dev->agp->agp_info.aper_size*1024*1024 );
David Dawes56bd9c22001-07-30 19:59:39 +0000656 DRM_DEBUG( "mtrr_del=%d\n", retval );
657 }
Gareth Hughes36047532001-02-15 08:12:14 +0000658#endif
659
David Dawes56bd9c22001-07-30 19:59:39 +0000660 DRM(takedown)( dev );
Gareth Hughes36047532001-02-15 08:12:14 +0000661
662#if __REALLY_HAVE_AGP
David Dawes56bd9c22001-07-30 19:59:39 +0000663 if ( dev->agp ) {
664 DRM(agp_uninit)();
665 DRM(free)( dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS );
666 dev->agp = NULL;
667 }
Gareth Hughes36047532001-02-15 08:12:14 +0000668#endif
David Dawes56bd9c22001-07-30 19:59:39 +0000669 }
Jeff Hartmann51e38d92001-08-07 18:15:10 +0000670 DRIVER_POSTCLEANUP();
David Dawes56bd9c22001-07-30 19:59:39 +0000671 kfree(DRM(minor));
672 kfree(DRM(device));
673 DRM(numdevs) = 0;
Gareth Hughes36047532001-02-15 08:12:14 +0000674}
675
676module_init( drm_init );
677module_exit( drm_cleanup );
678
679
680int DRM(version)( struct inode *inode, struct file *filp,
681 unsigned int cmd, unsigned long arg )
682{
683 drm_version_t version;
684 int len;
685
686 if ( copy_from_user( &version,
687 (drm_version_t *)arg,
688 sizeof(version) ) )
689 return -EFAULT;
690
691#define DRM_COPY( name, value ) \
692 len = strlen( value ); \
693 if ( len > name##_len ) len = name##_len; \
694 name##_len = strlen( value ); \
695 if ( len && name ) { \
696 if ( copy_to_user( name, value, len ) ) \
697 return -EFAULT; \
698 }
699
700 version.version_major = DRIVER_MAJOR;
701 version.version_minor = DRIVER_MINOR;
702 version.version_patchlevel = DRIVER_PATCHLEVEL;
703
704 DRM_COPY( version.name, DRIVER_NAME );
705 DRM_COPY( version.date, DRIVER_DATE );
706 DRM_COPY( version.desc, DRIVER_DESC );
707
708 if ( copy_to_user( (drm_version_t *)arg,
709 &version,
710 sizeof(version) ) )
711 return -EFAULT;
712 return 0;
713}
714
715int DRM(open)( struct inode *inode, struct file *filp )
716{
David Dawes56bd9c22001-07-30 19:59:39 +0000717 drm_device_t *dev = NULL;
Gareth Hughes36047532001-02-15 08:12:14 +0000718 int retcode = 0;
David Dawes56bd9c22001-07-30 19:59:39 +0000719 int i;
720
721 for (i = 0; i < DRM(numdevs); i++) {
Keith Whitwell9e67da52002-05-16 23:47:15 +0000722 if (minor(inode->i_rdev) == DRM(minor)[i]) {
David Dawes56bd9c22001-07-30 19:59:39 +0000723 dev = &(DRM(device)[i]);
724 break;
725 }
726 }
727 if (!dev) {
728 return -ENODEV;
729 }
Gareth Hughes36047532001-02-15 08:12:14 +0000730
731 DRM_DEBUG( "open_count = %d\n", dev->open_count );
732
733 retcode = DRM(open_helper)( inode, filp, dev );
734 if ( !retcode ) {
Gareth Hughes36047532001-02-15 08:12:14 +0000735 atomic_inc( &dev->counts[_DRM_STAT_OPENS] );
736 spin_lock( &dev->count_lock );
737 if ( !dev->open_count++ ) {
738 spin_unlock( &dev->count_lock );
739 return DRM(setup)( dev );
740 }
741 spin_unlock( &dev->count_lock );
742 }
743
744 return retcode;
745}
746
747int DRM(release)( struct inode *inode, struct file *filp )
748{
749 drm_file_t *priv = filp->private_data;
750 drm_device_t *dev;
751 int retcode = 0;
752
753 lock_kernel();
754 dev = priv->dev;
755
756 DRM_DEBUG( "open_count = %d\n", dev->open_count );
757
758 DRIVER_PRERELEASE();
759
760 /* ========================================================
761 * Begin inline drm_release
762 */
763
764 DRM_DEBUG( "pid = %d, device = 0x%x, open_count = %d\n",
765 current->pid, dev->device, dev->open_count );
766
767 if ( dev->lock.hw_lock &&
768 _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
769 dev->lock.pid == current->pid ) {
Gareth Hughes8c511c62001-03-07 15:06:57 +0000770 DRM_DEBUG( "Process %d dead, freeing lock for context %d\n",
Gareth Hughes36047532001-02-15 08:12:14 +0000771 current->pid,
772 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
773#if __HAVE_RELEASE
774 DRIVER_RELEASE();
775#endif
776 DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
777 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
778
779 /* FIXME: may require heavy-handed reset of
780 hardware at this point, possibly
781 processed via a callback to the X
782 server. */
783 }
784#if __HAVE_RELEASE
785 else if ( dev->lock.hw_lock ) {
786 /* The lock is required to reclaim buffers */
787 DECLARE_WAITQUEUE( entry, current );
788 add_wait_queue( &dev->lock.lock_queue, &entry );
789 for (;;) {
790 current->state = TASK_INTERRUPTIBLE;
791 if ( !dev->lock.hw_lock ) {
792 /* Device has been unregistered */
793 retcode = -EINTR;
794 break;
795 }
796 if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
797 DRM_KERNEL_CONTEXT ) ) {
798 dev->lock.pid = priv->pid;
799 dev->lock.lock_time = jiffies;
800 atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
801 break; /* Got lock */
802 }
803 /* Contention */
804#if 0
805 atomic_inc( &dev->total_sleeps );
806#endif
807 schedule();
808 if ( signal_pending( current ) ) {
809 retcode = -ERESTARTSYS;
810 break;
811 }
812 }
813 current->state = TASK_RUNNING;
814 remove_wait_queue( &dev->lock.lock_queue, &entry );
815 if( !retcode ) {
816 DRIVER_RELEASE();
817 DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
818 DRM_KERNEL_CONTEXT );
819 }
820 }
Gareth Hughes01a14782001-02-16 05:24:06 +0000821#elif __HAVE_DMA
Gareth Hughes36047532001-02-15 08:12:14 +0000822 DRM(reclaim_buffers)( dev, priv->pid );
823#endif
824
825 DRM(fasync)( -1, filp, 0 );
826
827 down( &dev->struct_sem );
828 if ( priv->remove_auth_on_close == 1 ) {
829 drm_file_t *temp = dev->file_first;
830 while ( temp ) {
831 temp->authenticated = 0;
832 temp = temp->next;
833 }
834 }
835 if ( priv->prev ) {
836 priv->prev->next = priv->next;
837 } else {
838 dev->file_first = priv->next;
839 }
840 if ( priv->next ) {
841 priv->next->prev = priv->prev;
842 } else {
843 dev->file_last = priv->prev;
844 }
845 up( &dev->struct_sem );
846
847 DRM(free)( priv, sizeof(*priv), DRM_MEM_FILES );
848
849 /* ========================================================
850 * End inline drm_release
851 */
852
Gareth Hughes36047532001-02-15 08:12:14 +0000853 atomic_inc( &dev->counts[_DRM_STAT_CLOSES] );
854 spin_lock( &dev->count_lock );
855 if ( !--dev->open_count ) {
856 if ( atomic_read( &dev->ioctl_count ) || dev->blocked ) {
857 DRM_ERROR( "Device busy: %d %d\n",
858 atomic_read( &dev->ioctl_count ),
859 dev->blocked );
860 spin_unlock( &dev->count_lock );
861 unlock_kernel();
862 return -EBUSY;
863 }
864 spin_unlock( &dev->count_lock );
865 unlock_kernel();
866 return DRM(takedown)( dev );
867 }
868 spin_unlock( &dev->count_lock );
869
870 unlock_kernel();
871 return retcode;
872}
873
874/* DRM(ioctl) is called whenever a process performs an ioctl on /dev/drm.
875 */
876int DRM(ioctl)( struct inode *inode, struct file *filp,
877 unsigned int cmd, unsigned long arg )
878{
879 drm_file_t *priv = filp->private_data;
880 drm_device_t *dev = priv->dev;
881 drm_ioctl_desc_t *ioctl;
882 drm_ioctl_t *func;
883 int nr = DRM_IOCTL_NR(cmd);
884 int retcode = 0;
885
886 atomic_inc( &dev->ioctl_count );
887 atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] );
888 ++priv->ioctl_count;
889
890 DRM_DEBUG( "pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%x, auth=%d\n",
891 current->pid, cmd, nr, dev->device, priv->authenticated );
892
893 if ( nr >= DRIVER_IOCTL_COUNT ) {
894 retcode = -EINVAL;
895 } else {
896 ioctl = &DRM(ioctls)[nr];
897 func = ioctl->func;
898
899 if ( !func ) {
900 DRM_DEBUG( "no function\n" );
901 retcode = -EINVAL;
902 } else if ( ( ioctl->root_only && !capable( CAP_SYS_ADMIN ) )||
903 ( ioctl->auth_needed && !priv->authenticated ) ) {
904 retcode = -EACCES;
905 } else {
906 retcode = func( inode, filp, cmd, arg );
907 }
908 }
909
910 atomic_dec( &dev->ioctl_count );
911 return retcode;
912}
913
914int DRM(lock)( struct inode *inode, struct file *filp,
915 unsigned int cmd, unsigned long arg )
916{
917 drm_file_t *priv = filp->private_data;
918 drm_device_t *dev = priv->dev;
919 DECLARE_WAITQUEUE( entry, current );
920 drm_lock_t lock;
921 int ret = 0;
922#if __HAVE_MULTIPLE_DMA_QUEUES
923 drm_queue_t *q;
924#endif
Gareth Hughes01a14782001-02-16 05:24:06 +0000925#if __HAVE_DMA_HISTOGRAM
Gareth Hughes36047532001-02-15 08:12:14 +0000926 cycles_t start;
927
928 dev->lck_start = start = get_cycles();
929#endif
930
931 if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
932 return -EFAULT;
933
934 if ( lock.context == DRM_KERNEL_CONTEXT ) {
935 DRM_ERROR( "Process %d using kernel context %d\n",
936 current->pid, lock.context );
937 return -EINVAL;
938 }
939
940 DRM_DEBUG( "%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
941 lock.context, current->pid,
942 dev->lock.hw_lock->lock, lock.flags );
943
944#if __HAVE_DMA_QUEUE
945 if ( lock.context < 0 )
946 return -EINVAL;
947#elif __HAVE_MULTIPLE_DMA_QUEUES
948 if ( lock.context < 0 || lock.context >= dev->queue_count )
949 return -EINVAL;
950 q = dev->queuelist[lock.context];
951#endif
952
953#if __HAVE_DMA_FLUSH
954 ret = DRM(flush_block_and_flush)( dev, lock.context, lock.flags );
955#endif
956 if ( !ret ) {
957 add_wait_queue( &dev->lock.lock_queue, &entry );
958 for (;;) {
959 current->state = TASK_INTERRUPTIBLE;
960 if ( !dev->lock.hw_lock ) {
961 /* Device has been unregistered */
962 ret = -EINTR;
963 break;
964 }
965 if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
966 lock.context ) ) {
967 dev->lock.pid = current->pid;
968 dev->lock.lock_time = jiffies;
969 atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
970 break; /* Got lock */
971 }
972
973 /* Contention */
974 schedule();
975 if ( signal_pending( current ) ) {
976 ret = -ERESTARTSYS;
977 break;
978 }
979 }
980 current->state = TASK_RUNNING;
981 remove_wait_queue( &dev->lock.lock_queue, &entry );
982 }
983
984#if __HAVE_DMA_FLUSH
985 DRM(flush_unblock)( dev, lock.context, lock.flags ); /* cleanup phase */
986#endif
987
988 if ( !ret ) {
989 sigemptyset( &dev->sigmask );
990 sigaddset( &dev->sigmask, SIGSTOP );
991 sigaddset( &dev->sigmask, SIGTSTP );
992 sigaddset( &dev->sigmask, SIGTTIN );
993 sigaddset( &dev->sigmask, SIGTTOU );
994 dev->sigdata.context = lock.context;
995 dev->sigdata.lock = dev->lock.hw_lock;
996 block_all_signals( DRM(notifier),
997 &dev->sigdata, &dev->sigmask );
998
999#if __HAVE_DMA_READY
1000 if ( lock.flags & _DRM_LOCK_READY ) {
1001 DRIVER_DMA_READY();
1002 }
1003#endif
1004#if __HAVE_DMA_QUIESCENT
1005 if ( lock.flags & _DRM_LOCK_QUIESCENT ) {
1006 DRIVER_DMA_QUIESCENT();
1007 }
1008#endif
Jeff Hartmann51e38d92001-08-07 18:15:10 +00001009#if __HAVE_KERNEL_CTX_SWITCH
1010 if ( dev->last_context != lock.context ) {
1011 DRM(context_switch)(dev, dev->last_context,
1012 lock.context);
1013 }
1014#endif
Gareth Hughes36047532001-02-15 08:12:14 +00001015 }
1016
1017 DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
1018
Gareth Hughes01a14782001-02-16 05:24:06 +00001019#if __HAVE_DMA_HISTOGRAM
Gareth Hughes36047532001-02-15 08:12:14 +00001020 atomic_inc(&dev->histo.lacq[DRM(histogram_slot)(get_cycles()-start)]);
1021#endif
1022 return ret;
1023}
1024
1025
1026int DRM(unlock)( struct inode *inode, struct file *filp,
1027 unsigned int cmd, unsigned long arg )
1028{
1029 drm_file_t *priv = filp->private_data;
1030 drm_device_t *dev = priv->dev;
1031 drm_lock_t lock;
1032
1033 if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
1034 return -EFAULT;
1035
1036 if ( lock.context == DRM_KERNEL_CONTEXT ) {
1037 DRM_ERROR( "Process %d using kernel context %d\n",
1038 current->pid, lock.context );
1039 return -EINVAL;
1040 }
1041
1042 atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] );
1043
Alan Hourihane92ad1b62001-10-22 19:15:04 +00001044#if __HAVE_KERNEL_CTX_SWITCH
1045 /* We no longer really hold it, but if we are the next
1046 * agent to request it then we should just be able to
1047 * take it immediately and not eat the ioctl.
1048 */
1049 dev->lock.pid = 0;
1050 {
1051 __volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;
1052 unsigned int old, new, prev, ctx;
1053
1054 ctx = lock.context;
1055 do {
1056 old = *plock;
1057 new = ctx;
1058 prev = cmpxchg(plock, old, new);
1059 } while (prev != old);
1060 }
1061 wake_up_interruptible(&dev->lock.lock_queue);
1062#else
Gareth Hughes36047532001-02-15 08:12:14 +00001063 DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock,
1064 DRM_KERNEL_CONTEXT );
1065#if __HAVE_DMA_SCHEDULE
1066 DRM(dma_schedule)( dev, 1 );
1067#endif
1068
1069 /* FIXME: Do we ever really need to check this???
1070 */
1071 if ( 1 /* !dev->context_flag */ ) {
1072 if ( DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
1073 DRM_KERNEL_CONTEXT ) ) {
1074 DRM_ERROR( "\n" );
1075 }
1076 }
Alan Hourihane92ad1b62001-10-22 19:15:04 +00001077#endif /* !__HAVE_KERNEL_CTX_SWITCH */
Gareth Hughes36047532001-02-15 08:12:14 +00001078
1079 unblock_all_signals();
1080 return 0;
1081}