blob: f7ca95003f54386bc45143e93226f3d0c950f515 [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright 2005-2006 Stephane Marchesin
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "drmP.h"
26#include "drm.h"
27#include "nouveau_drv.h"
28#include "nouveau_drm.h"
29#include "nouveau_dma.h"
30
31static int
32nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
33{
34 struct drm_device *dev = chan->dev;
35 struct drm_nouveau_private *dev_priv = dev->dev_private;
36 struct nouveau_bo *pb = chan->pushbuf_bo;
37 struct nouveau_gpuobj *pushbuf = NULL;
38 uint32_t start = pb->bo.mem.mm_node->start << PAGE_SHIFT;
39 int ret;
40
41 if (pb->bo.mem.mem_type == TTM_PL_TT) {
42 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
43 dev_priv->gart_info.aper_size,
44 NV_DMA_ACCESS_RO, &pushbuf,
45 NULL);
46 chan->pushbuf_base = start;
47 } else
48 if (dev_priv->card_type != NV_04) {
49 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
50 dev_priv->fb_available_size,
51 NV_DMA_ACCESS_RO,
52 NV_DMA_TARGET_VIDMEM, &pushbuf);
53 chan->pushbuf_base = start;
54 } else {
55 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
56 * exact reason for existing :) PCI access to cmdbuf in
57 * VRAM.
58 */
59 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
60 drm_get_resource_start(dev, 1),
61 dev_priv->fb_available_size,
62 NV_DMA_ACCESS_RO,
63 NV_DMA_TARGET_PCI, &pushbuf);
64 chan->pushbuf_base = start;
65 }
66
67 ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf);
68 if (ret) {
69 NV_ERROR(dev, "Error referencing pushbuf ctxdma: %d\n", ret);
70 if (pushbuf != dev_priv->gart_info.sg_ctxdma)
71 nouveau_gpuobj_del(dev, &pushbuf);
72 return ret;
73 }
74
75 return 0;
76}
77
78static struct nouveau_bo *
79nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
80{
81 struct nouveau_bo *pushbuf = NULL;
82 int location, ret;
83
84 if (nouveau_vram_pushbuf)
85 location = TTM_PL_FLAG_VRAM;
86 else
87 location = TTM_PL_FLAG_TT;
88
89 ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false,
90 true, &pushbuf);
91 if (ret) {
92 NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
93 return NULL;
94 }
95
96 ret = nouveau_bo_pin(pushbuf, location);
97 if (ret) {
98 NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret);
99 nouveau_bo_ref(NULL, &pushbuf);
100 return NULL;
101 }
102
103 return pushbuf;
104}
105
106/* allocates and initializes a fifo for user space consumption */
107int
108nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
109 struct drm_file *file_priv,
110 uint32_t vram_handle, uint32_t tt_handle)
111{
112 struct drm_nouveau_private *dev_priv = dev->dev_private;
113 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
114 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
115 struct nouveau_channel *chan;
116 int channel, user;
117 int ret;
118
119 /*
120 * Alright, here is the full story
121 * Nvidia cards have multiple hw fifo contexts (praise them for that,
122 * no complicated crash-prone context switches)
123 * We allocate a new context for each app and let it write to it
124 * directly (woo, full userspace command submission !)
125 * When there are no more contexts, you lost
126 */
127 for (channel = 0; channel < pfifo->channels; channel++) {
128 if (dev_priv->fifos[channel] == NULL)
129 break;
130 }
131
132 /* no more fifos. you lost. */
133 if (channel == pfifo->channels)
134 return -EINVAL;
135
136 dev_priv->fifos[channel] = kzalloc(sizeof(struct nouveau_channel),
137 GFP_KERNEL);
138 if (!dev_priv->fifos[channel])
139 return -ENOMEM;
140 dev_priv->fifo_alloc_count++;
141 chan = dev_priv->fifos[channel];
142 INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
143 INIT_LIST_HEAD(&chan->fence.pending);
144 chan->dev = dev;
145 chan->id = channel;
146 chan->file_priv = file_priv;
147 chan->vram_handle = vram_handle;
148 chan->gart_handle = tt_handle;
149
150 NV_INFO(dev, "Allocating FIFO number %d\n", channel);
151
152 /* Allocate DMA push buffer */
153 chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev);
154 if (!chan->pushbuf_bo) {
155 ret = -ENOMEM;
156 NV_ERROR(dev, "pushbuf %d\n", ret);
157 nouveau_channel_free(chan);
158 return ret;
159 }
160
Ben Skeggs75c99da2010-01-08 10:57:39 +1000161 nouveau_dma_pre_init(chan);
162
Ben Skeggs6ee73862009-12-11 19:24:15 +1000163 /* Locate channel's user control regs */
164 if (dev_priv->card_type < NV_40)
165 user = NV03_USER(channel);
166 else
167 if (dev_priv->card_type < NV_50)
168 user = NV40_USER(channel);
169 else
170 user = NV50_USER(channel);
171
172 chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user,
173 PAGE_SIZE);
174 if (!chan->user) {
175 NV_ERROR(dev, "ioremap of regs failed.\n");
176 nouveau_channel_free(chan);
177 return -ENOMEM;
178 }
179 chan->user_put = 0x40;
180 chan->user_get = 0x44;
181
182 /* Allocate space for per-channel fixed notifier memory */
183 ret = nouveau_notifier_init_channel(chan);
184 if (ret) {
185 NV_ERROR(dev, "ntfy %d\n", ret);
186 nouveau_channel_free(chan);
187 return ret;
188 }
189
190 /* Setup channel's default objects */
191 ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle);
192 if (ret) {
193 NV_ERROR(dev, "gpuobj %d\n", ret);
194 nouveau_channel_free(chan);
195 return ret;
196 }
197
198 /* Create a dma object for the push buffer */
199 ret = nouveau_channel_pushbuf_ctxdma_init(chan);
200 if (ret) {
201 NV_ERROR(dev, "pbctxdma %d\n", ret);
202 nouveau_channel_free(chan);
203 return ret;
204 }
205
206 /* disable the fifo caches */
207 pfifo->reassign(dev, false);
208
209 /* Create a graphics context for new channel */
210 ret = pgraph->create_context(chan);
211 if (ret) {
212 nouveau_channel_free(chan);
213 return ret;
214 }
215
216 /* Construct inital RAMFC for new channel */
217 ret = pfifo->create_context(chan);
218 if (ret) {
219 nouveau_channel_free(chan);
220 return ret;
221 }
222
223 pfifo->reassign(dev, true);
224
225 ret = nouveau_dma_init(chan);
226 if (!ret)
227 ret = nouveau_fence_init(chan);
228 if (ret) {
229 nouveau_channel_free(chan);
230 return ret;
231 }
232
233 nouveau_debugfs_channel_init(chan);
234
235 NV_INFO(dev, "%s: initialised FIFO %d\n", __func__, channel);
236 *chan_ret = chan;
237 return 0;
238}
239
Ben Skeggs6ee73862009-12-11 19:24:15 +1000240/* stops a fifo */
241void
242nouveau_channel_free(struct nouveau_channel *chan)
243{
244 struct drm_device *dev = chan->dev;
245 struct drm_nouveau_private *dev_priv = dev->dev_private;
246 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
247 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
248 unsigned long flags;
249 int ret;
250
251 NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id);
252
253 nouveau_debugfs_channel_fini(chan);
254
255 /* Give outstanding push buffers a chance to complete */
256 spin_lock_irqsave(&chan->fence.lock, flags);
257 nouveau_fence_update(chan);
258 spin_unlock_irqrestore(&chan->fence.lock, flags);
259 if (chan->fence.sequence != chan->fence.sequence_ack) {
260 struct nouveau_fence *fence = NULL;
261
262 ret = nouveau_fence_new(chan, &fence, true);
263 if (ret == 0) {
264 ret = nouveau_fence_wait(fence, NULL, false, false);
265 nouveau_fence_unref((void *)&fence);
266 }
267
268 if (ret)
269 NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
270 }
271
272 /* Ensure all outstanding fences are signaled. They should be if the
273 * above attempts at idling were OK, but if we failed this'll tell TTM
274 * we're done with the buffers.
275 */
276 nouveau_fence_fini(chan);
277
Maarten Maathuisff9e5272010-02-01 20:58:27 +0100278 /* This will prevent pfifo from switching channels. */
Ben Skeggs6ee73862009-12-11 19:24:15 +1000279 pfifo->reassign(dev, false);
280
Maarten Maathuisff9e5272010-02-01 20:58:27 +0100281 /* We want to give pgraph a chance to idle and get rid of all potential
282 * errors. We need to do this before the lock, otherwise the irq handler
283 * is unable to process them.
284 */
285 if (pgraph->channel(dev) == chan)
286 nouveau_wait_for_idle(dev);
287
288 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
289
Maarten Maathuisa51a3bf2010-02-01 18:32:09 +0100290 pgraph->fifo_access(dev, false);
291 if (pgraph->channel(dev) == chan)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000292 pgraph->unload_context(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000293 pgraph->destroy_context(chan);
Maarten Maathuisa51a3bf2010-02-01 18:32:09 +0100294 pgraph->fifo_access(dev, true);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000295
296 if (pfifo->channel_id(dev) == chan->id) {
297 pfifo->disable(dev);
298 pfifo->unload_context(dev);
299 pfifo->enable(dev);
300 }
301 pfifo->destroy_context(chan);
302
303 pfifo->reassign(dev, true);
304
Maarten Maathuisff9e5272010-02-01 20:58:27 +0100305 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
306
Ben Skeggs6ee73862009-12-11 19:24:15 +1000307 /* Release the channel's resources */
308 nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
309 if (chan->pushbuf_bo) {
310 nouveau_bo_unpin(chan->pushbuf_bo);
311 nouveau_bo_ref(NULL, &chan->pushbuf_bo);
312 }
313 nouveau_gpuobj_channel_takedown(chan);
314 nouveau_notifier_takedown_channel(chan);
315 if (chan->user)
316 iounmap(chan->user);
317
318 dev_priv->fifos[chan->id] = NULL;
319 dev_priv->fifo_alloc_count--;
320 kfree(chan);
321}
322
323/* cleans up all the fifos from file_priv */
324void
325nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
326{
327 struct drm_nouveau_private *dev_priv = dev->dev_private;
328 struct nouveau_engine *engine = &dev_priv->engine;
329 int i;
330
331 NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
332 for (i = 0; i < engine->fifo.channels; i++) {
333 struct nouveau_channel *chan = dev_priv->fifos[i];
334
335 if (chan && chan->file_priv == file_priv)
336 nouveau_channel_free(chan);
337 }
338}
339
340int
341nouveau_channel_owner(struct drm_device *dev, struct drm_file *file_priv,
342 int channel)
343{
344 struct drm_nouveau_private *dev_priv = dev->dev_private;
345 struct nouveau_engine *engine = &dev_priv->engine;
346
347 if (channel >= engine->fifo.channels)
348 return 0;
349 if (dev_priv->fifos[channel] == NULL)
350 return 0;
351
352 return (dev_priv->fifos[channel]->file_priv == file_priv);
353}
354
355/***********************************
356 * ioctls wrapping the functions
357 ***********************************/
358
359static int
360nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
361 struct drm_file *file_priv)
362{
363 struct drm_nouveau_private *dev_priv = dev->dev_private;
364 struct drm_nouveau_channel_alloc *init = data;
365 struct nouveau_channel *chan;
366 int ret;
367
368 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
369
370 if (dev_priv->engine.graph.accel_blocked)
371 return -ENODEV;
372
373 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
374 return -EINVAL;
375
376 ret = nouveau_channel_alloc(dev, &chan, file_priv,
377 init->fb_ctxdma_handle,
378 init->tt_ctxdma_handle);
379 if (ret)
380 return ret;
381 init->channel = chan->id;
382
383 init->subchan[0].handle = NvM2MF;
384 if (dev_priv->card_type < NV_50)
385 init->subchan[0].grclass = 0x0039;
386 else
387 init->subchan[0].grclass = 0x5039;
Francisco Jerezf03a314b2009-12-26 02:42:45 +0100388 init->subchan[1].handle = NvSw;
389 init->subchan[1].grclass = NV_SW;
390 init->nr_subchan = 2;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000391
392 /* Named memory object area */
393 ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
394 &init->notifier_handle);
395 if (ret) {
396 nouveau_channel_free(chan);
397 return ret;
398 }
399
400 return 0;
401}
402
403static int
404nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
405 struct drm_file *file_priv)
406{
407 struct drm_nouveau_channel_free *cfree = data;
408 struct nouveau_channel *chan;
409
410 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
411 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan);
412
413 nouveau_channel_free(chan);
414 return 0;
415}
416
417/***********************************
418 * finally, the ioctl table
419 ***********************************/
420
421struct drm_ioctl_desc nouveau_ioctls[] = {
422 DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH),
423 DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
424 DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
425 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
426 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
427 DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
428 DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
429 DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
430 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
431 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
432 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL, nouveau_gem_ioctl_pushbuf_call, DRM_AUTH),
433 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PIN, nouveau_gem_ioctl_pin, DRM_AUTH),
434 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_UNPIN, nouveau_gem_ioctl_unpin, DRM_AUTH),
435 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
436 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
437 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
438 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL2, nouveau_gem_ioctl_pushbuf_call2, DRM_AUTH),
439};
440
441int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);