blob: 47bf2d3d658c86de886e2ab113eccb6a51bbafc8 [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright 2005-2006 Stephane Marchesin
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "drmP.h"
26#include "drm.h"
27#include "nouveau_drv.h"
28#include "nouveau_drm.h"
29#include "nouveau_dma.h"
30
31static int
32nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
33{
34 struct drm_device *dev = chan->dev;
35 struct drm_nouveau_private *dev_priv = dev->dev_private;
36 struct nouveau_bo *pb = chan->pushbuf_bo;
37 struct nouveau_gpuobj *pushbuf = NULL;
Ben Skeggs6ee73862009-12-11 19:24:15 +100038 int ret;
39
Ben Skeggsd87897d2010-02-12 11:11:54 +100040 if (dev_priv->card_type >= NV_50) {
41 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
42 dev_priv->vm_end, NV_DMA_ACCESS_RO,
43 NV_DMA_TARGET_AGP, &pushbuf);
44 chan->pushbuf_base = pb->bo.offset;
45 } else
Ben Skeggs6ee73862009-12-11 19:24:15 +100046 if (pb->bo.mem.mem_type == TTM_PL_TT) {
47 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
48 dev_priv->gart_info.aper_size,
49 NV_DMA_ACCESS_RO, &pushbuf,
50 NULL);
Ben Skeggsd961db72010-08-05 10:48:18 +100051 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
Ben Skeggs6ee73862009-12-11 19:24:15 +100052 } else
53 if (dev_priv->card_type != NV_04) {
54 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
55 dev_priv->fb_available_size,
56 NV_DMA_ACCESS_RO,
57 NV_DMA_TARGET_VIDMEM, &pushbuf);
Ben Skeggsd961db72010-08-05 10:48:18 +100058 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
Ben Skeggs6ee73862009-12-11 19:24:15 +100059 } else {
60 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
61 * exact reason for existing :) PCI access to cmdbuf in
62 * VRAM.
63 */
64 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
Jordan Crouse01d73a62010-05-27 13:40:24 -060065 pci_resource_start(dev->pdev,
66 1),
Ben Skeggs6ee73862009-12-11 19:24:15 +100067 dev_priv->fb_available_size,
68 NV_DMA_ACCESS_RO,
69 NV_DMA_TARGET_PCI, &pushbuf);
Ben Skeggsd961db72010-08-05 10:48:18 +100070 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
Ben Skeggs6ee73862009-12-11 19:24:15 +100071 }
72
Ben Skeggsa8eaebc2010-09-01 15:24:31 +100073 nouveau_gpuobj_ref(pushbuf, &chan->pushbuf);
74 nouveau_gpuobj_ref(NULL, &pushbuf);
Ben Skeggs6ee73862009-12-11 19:24:15 +100075 return 0;
76}
77
78static struct nouveau_bo *
79nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
80{
81 struct nouveau_bo *pushbuf = NULL;
82 int location, ret;
83
84 if (nouveau_vram_pushbuf)
85 location = TTM_PL_FLAG_VRAM;
86 else
87 location = TTM_PL_FLAG_TT;
88
89 ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false,
90 true, &pushbuf);
91 if (ret) {
92 NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
93 return NULL;
94 }
95
96 ret = nouveau_bo_pin(pushbuf, location);
97 if (ret) {
98 NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret);
99 nouveau_bo_ref(NULL, &pushbuf);
100 return NULL;
101 }
102
103 return pushbuf;
104}
105
106/* allocates and initializes a fifo for user space consumption */
107int
108nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
109 struct drm_file *file_priv,
Ben Skeggscff5c132010-10-06 16:16:59 +1000110 uint32_t vram_handle, uint32_t gart_handle)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000111{
112 struct drm_nouveau_private *dev_priv = dev->dev_private;
113 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
114 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
115 struct nouveau_channel *chan;
Ben Skeggscff5c132010-10-06 16:16:59 +1000116 unsigned long flags;
117 int user, ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000118
Ben Skeggscff5c132010-10-06 16:16:59 +1000119 /* allocate and lock channel structure */
120 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
121 if (!chan)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000122 return -ENOMEM;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000123 chan->dev = dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000124 chan->file_priv = file_priv;
125 chan->vram_handle = vram_handle;
Ben Skeggscff5c132010-10-06 16:16:59 +1000126 chan->gart_handle = gart_handle;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000127
Ben Skeggscff5c132010-10-06 16:16:59 +1000128 atomic_set(&chan->refcount, 1);
129 mutex_init(&chan->mutex);
130 mutex_lock(&chan->mutex);
131
132 /* allocate hw channel id */
133 spin_lock_irqsave(&dev_priv->channels.lock, flags);
134 for (chan->id = 0; chan->id < pfifo->channels; chan->id++) {
135 if (!dev_priv->channels.ptr[chan->id]) {
136 dev_priv->channels.ptr[chan->id] = chan;
137 break;
138 }
139 }
140 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
141
142 if (chan->id == pfifo->channels) {
143 mutex_unlock(&chan->mutex);
144 kfree(chan);
145 return -ENODEV;
146 }
147
148 NV_DEBUG(dev, "initialising channel %d\n", chan->id);
149 INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
150 INIT_LIST_HEAD(&chan->fence.pending);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000151
152 /* Allocate DMA push buffer */
153 chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev);
154 if (!chan->pushbuf_bo) {
155 ret = -ENOMEM;
156 NV_ERROR(dev, "pushbuf %d\n", ret);
Ben Skeggscff5c132010-10-06 16:16:59 +1000157 nouveau_channel_put(&chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000158 return ret;
159 }
160
Ben Skeggs75c99da2010-01-08 10:57:39 +1000161 nouveau_dma_pre_init(chan);
162
Ben Skeggs6ee73862009-12-11 19:24:15 +1000163 /* Locate channel's user control regs */
164 if (dev_priv->card_type < NV_40)
Ben Skeggscff5c132010-10-06 16:16:59 +1000165 user = NV03_USER(chan->id);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000166 else
167 if (dev_priv->card_type < NV_50)
Ben Skeggscff5c132010-10-06 16:16:59 +1000168 user = NV40_USER(chan->id);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000169 else
Ben Skeggscff5c132010-10-06 16:16:59 +1000170 user = NV50_USER(chan->id);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000171
172 chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user,
173 PAGE_SIZE);
174 if (!chan->user) {
175 NV_ERROR(dev, "ioremap of regs failed.\n");
Ben Skeggscff5c132010-10-06 16:16:59 +1000176 nouveau_channel_put(&chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000177 return -ENOMEM;
178 }
179 chan->user_put = 0x40;
180 chan->user_get = 0x44;
181
182 /* Allocate space for per-channel fixed notifier memory */
183 ret = nouveau_notifier_init_channel(chan);
184 if (ret) {
185 NV_ERROR(dev, "ntfy %d\n", ret);
Ben Skeggscff5c132010-10-06 16:16:59 +1000186 nouveau_channel_put(&chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000187 return ret;
188 }
189
190 /* Setup channel's default objects */
Ben Skeggscff5c132010-10-06 16:16:59 +1000191 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000192 if (ret) {
193 NV_ERROR(dev, "gpuobj %d\n", ret);
Ben Skeggscff5c132010-10-06 16:16:59 +1000194 nouveau_channel_put(&chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000195 return ret;
196 }
197
198 /* Create a dma object for the push buffer */
199 ret = nouveau_channel_pushbuf_ctxdma_init(chan);
200 if (ret) {
201 NV_ERROR(dev, "pbctxdma %d\n", ret);
Ben Skeggscff5c132010-10-06 16:16:59 +1000202 nouveau_channel_put(&chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000203 return ret;
204 }
205
206 /* disable the fifo caches */
207 pfifo->reassign(dev, false);
208
209 /* Create a graphics context for new channel */
210 ret = pgraph->create_context(chan);
211 if (ret) {
Ben Skeggscff5c132010-10-06 16:16:59 +1000212 nouveau_channel_put(&chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000213 return ret;
214 }
215
216 /* Construct inital RAMFC for new channel */
217 ret = pfifo->create_context(chan);
218 if (ret) {
Ben Skeggscff5c132010-10-06 16:16:59 +1000219 nouveau_channel_put(&chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000220 return ret;
221 }
222
223 pfifo->reassign(dev, true);
224
225 ret = nouveau_dma_init(chan);
226 if (!ret)
Francisco Jerez27307232010-09-21 18:57:11 +0200227 ret = nouveau_fence_channel_init(chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000228 if (ret) {
Ben Skeggscff5c132010-10-06 16:16:59 +1000229 nouveau_channel_put(&chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000230 return ret;
231 }
232
233 nouveau_debugfs_channel_init(chan);
234
Ben Skeggscff5c132010-10-06 16:16:59 +1000235 NV_DEBUG(dev, "channel %d initialised\n", chan->id);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000236 *chan_ret = chan;
237 return 0;
238}
239
Ben Skeggscff5c132010-10-06 16:16:59 +1000240struct nouveau_channel *
241nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000242{
Ben Skeggscff5c132010-10-06 16:16:59 +1000243 struct drm_nouveau_private *dev_priv = dev->dev_private;
244 struct nouveau_channel *chan = ERR_PTR(-ENODEV);
245 unsigned long flags;
246
247 spin_lock_irqsave(&dev_priv->channels.lock, flags);
248 chan = dev_priv->channels.ptr[id];
249
250 if (unlikely(!chan || atomic_read(&chan->refcount) == 0)) {
251 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
252 return ERR_PTR(-EINVAL);
253 }
254
255 if (unlikely(file_priv && chan->file_priv != file_priv)) {
256 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
257 return ERR_PTR(-EINVAL);
258 }
259
260 atomic_inc(&chan->refcount);
261 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
262
263 mutex_lock(&chan->mutex);
264 return chan;
265}
266
267void
268nouveau_channel_put(struct nouveau_channel **pchan)
269{
270 struct nouveau_channel *chan = *pchan;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000271 struct drm_device *dev = chan->dev;
272 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000273 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
Ben Skeggscff5c132010-10-06 16:16:59 +1000274 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000275 unsigned long flags;
276 int ret;
277
Ben Skeggscff5c132010-10-06 16:16:59 +1000278 /* unlock the channel */
279 mutex_unlock(&chan->mutex);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000280
Ben Skeggscff5c132010-10-06 16:16:59 +1000281 /* decrement the refcount, and we're done if there's still refs */
282 if (likely(!atomic_dec_and_test(&chan->refcount))) {
283 *pchan = NULL;
284 return;
285 }
286
287 /* noone wants the channel anymore */
288 NV_DEBUG(dev, "freeing channel %d\n", chan->id);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000289 nouveau_debugfs_channel_fini(chan);
Ben Skeggscff5c132010-10-06 16:16:59 +1000290 *pchan = NULL;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000291
Ben Skeggscff5c132010-10-06 16:16:59 +1000292 /* give it chance to idle */
Ben Skeggs6ee73862009-12-11 19:24:15 +1000293 nouveau_fence_update(chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000294 if (chan->fence.sequence != chan->fence.sequence_ack) {
295 struct nouveau_fence *fence = NULL;
296
297 ret = nouveau_fence_new(chan, &fence, true);
298 if (ret == 0) {
299 ret = nouveau_fence_wait(fence, NULL, false, false);
300 nouveau_fence_unref((void *)&fence);
301 }
302
303 if (ret)
304 NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
305 }
306
Ben Skeggscff5c132010-10-06 16:16:59 +1000307 /* ensure all outstanding fences are signaled. they should be if the
Ben Skeggs6ee73862009-12-11 19:24:15 +1000308 * above attempts at idling were OK, but if we failed this'll tell TTM
309 * we're done with the buffers.
310 */
Francisco Jerez27307232010-09-21 18:57:11 +0200311 nouveau_fence_channel_fini(chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000312
Ben Skeggscff5c132010-10-06 16:16:59 +1000313 /* boot it off the hardware */
Ben Skeggs6ee73862009-12-11 19:24:15 +1000314 pfifo->reassign(dev, false);
315
Maarten Maathuisff9e5272010-02-01 20:58:27 +0100316 /* We want to give pgraph a chance to idle and get rid of all potential
317 * errors. We need to do this before the lock, otherwise the irq handler
318 * is unable to process them.
319 */
320 if (pgraph->channel(dev) == chan)
321 nouveau_wait_for_idle(dev);
322
323 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
324
Maarten Maathuisa51a3bf2010-02-01 18:32:09 +0100325 pgraph->fifo_access(dev, false);
326 if (pgraph->channel(dev) == chan)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000327 pgraph->unload_context(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000328 pgraph->destroy_context(chan);
Maarten Maathuisa51a3bf2010-02-01 18:32:09 +0100329 pgraph->fifo_access(dev, true);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000330
331 if (pfifo->channel_id(dev) == chan->id) {
332 pfifo->disable(dev);
333 pfifo->unload_context(dev);
334 pfifo->enable(dev);
335 }
336 pfifo->destroy_context(chan);
337
338 pfifo->reassign(dev, true);
339
Maarten Maathuisff9e5272010-02-01 20:58:27 +0100340 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
341
Ben Skeggscff5c132010-10-06 16:16:59 +1000342 /* aside from its resources, the channel should now be dead,
343 * remove it from the channel list
344 */
345 spin_lock_irqsave(&dev_priv->channels.lock, flags);
346 dev_priv->channels.ptr[chan->id] = NULL;
347 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
348
349 /* destroy any resources the channel owned */
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000350 nouveau_gpuobj_ref(NULL, &chan->pushbuf);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000351 if (chan->pushbuf_bo) {
Ben Skeggs9d59e8a2010-08-27 13:04:41 +1000352 nouveau_bo_unmap(chan->pushbuf_bo);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000353 nouveau_bo_unpin(chan->pushbuf_bo);
354 nouveau_bo_ref(NULL, &chan->pushbuf_bo);
355 }
356 nouveau_gpuobj_channel_takedown(chan);
357 nouveau_notifier_takedown_channel(chan);
358 if (chan->user)
359 iounmap(chan->user);
360
Ben Skeggs6ee73862009-12-11 19:24:15 +1000361 kfree(chan);
362}
363
364/* cleans up all the fifos from file_priv */
365void
366nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
367{
368 struct drm_nouveau_private *dev_priv = dev->dev_private;
369 struct nouveau_engine *engine = &dev_priv->engine;
Ben Skeggscff5c132010-10-06 16:16:59 +1000370 struct nouveau_channel *chan;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000371 int i;
372
373 NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
374 for (i = 0; i < engine->fifo.channels; i++) {
Ben Skeggscff5c132010-10-06 16:16:59 +1000375 chan = nouveau_channel_get(dev, file_priv, i);
376 if (IS_ERR(chan))
377 continue;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000378
Ben Skeggscff5c132010-10-06 16:16:59 +1000379 atomic_dec(&chan->refcount);
380 nouveau_channel_put(&chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000381 }
382}
383
Ben Skeggs6ee73862009-12-11 19:24:15 +1000384
385/***********************************
386 * ioctls wrapping the functions
387 ***********************************/
388
389static int
390nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
391 struct drm_file *file_priv)
392{
393 struct drm_nouveau_private *dev_priv = dev->dev_private;
394 struct drm_nouveau_channel_alloc *init = data;
395 struct nouveau_channel *chan;
396 int ret;
397
Ben Skeggs6ee73862009-12-11 19:24:15 +1000398 if (dev_priv->engine.graph.accel_blocked)
399 return -ENODEV;
400
401 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
402 return -EINVAL;
403
404 ret = nouveau_channel_alloc(dev, &chan, file_priv,
405 init->fb_ctxdma_handle,
406 init->tt_ctxdma_handle);
407 if (ret)
408 return ret;
409 init->channel = chan->id;
410
Ben Skeggsa1606a92010-02-12 10:27:35 +1000411 if (chan->dma.ib_max)
412 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
413 NOUVEAU_GEM_DOMAIN_GART;
414 else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
415 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
416 else
417 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
418
Ben Skeggs6ee73862009-12-11 19:24:15 +1000419 init->subchan[0].handle = NvM2MF;
420 if (dev_priv->card_type < NV_50)
421 init->subchan[0].grclass = 0x0039;
422 else
423 init->subchan[0].grclass = 0x5039;
Francisco Jerezf03a3142009-12-26 02:42:45 +0100424 init->subchan[1].handle = NvSw;
425 init->subchan[1].grclass = NV_SW;
426 init->nr_subchan = 2;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000427
428 /* Named memory object area */
429 ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
430 &init->notifier_handle);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000431
Ben Skeggscff5c132010-10-06 16:16:59 +1000432 if (ret == 0)
433 atomic_inc(&chan->refcount); /* userspace reference */
434 nouveau_channel_put(&chan);
435 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000436}
437
438static int
439nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
440 struct drm_file *file_priv)
441{
Ben Skeggscff5c132010-10-06 16:16:59 +1000442 struct drm_nouveau_channel_free *req = data;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000443 struct nouveau_channel *chan;
444
Ben Skeggscff5c132010-10-06 16:16:59 +1000445 chan = nouveau_channel_get(dev, file_priv, req->channel);
446 if (IS_ERR(chan))
447 return PTR_ERR(chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000448
Ben Skeggscff5c132010-10-06 16:16:59 +1000449 atomic_dec(&chan->refcount);
450 nouveau_channel_put(&chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000451 return 0;
452}
453
454/***********************************
455 * finally, the ioctl table
456 ***********************************/
457
458struct drm_ioctl_desc nouveau_ioctls[] = {
Dave Airlie1b2f1482010-08-14 20:20:34 +1000459 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
460 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
461 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
462 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
463 DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
464 DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
465 DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
466 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
467 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
468 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
469 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
470 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
Ben Skeggs6ee73862009-12-11 19:24:15 +1000471};
472
473int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);