blob: df445fcb8321c34e7f95c94dbe3aa52586501b0d [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright (C) 2006 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28/*
29 * Authors:
30 * Ben Skeggs <darktama@iinet.net.au>
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "nouveau_drv.h"
36#include "nouveau_drm.h"
Ben Skeggs479dcae2010-09-01 15:24:28 +100037#include "nouveau_ramht.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100038
39/* NVidia uses context objects to drive drawing operations.
40
41 Context objects can be selected into 8 subchannels in the FIFO,
42 and then used via DMA command buffers.
43
44 A context object is referenced by a user defined handle (CARD32). The HW
45 looks up graphics objects in a hash table in the instance RAM.
46
47 An entry in the hash table consists of 2 CARD32. The first CARD32 contains
48 the handle, the second one a bitfield, that contains the address of the
49 object in instance RAM.
50
51 The format of the second CARD32 seems to be:
52
53 NV4 to NV30:
54
55 15: 0 instance_addr >> 4
56 17:16 engine (here uses 1 = graphics)
57 28:24 channel id (here uses 0)
58 31 valid (use 1)
59
60 NV40:
61
62 15: 0 instance_addr >> 4 (maybe 19-0)
63 21:20 engine (here uses 1 = graphics)
64 I'm unsure about the other bits, but using 0 seems to work.
65
66 The key into the hash table depends on the object handle and channel id and
67 is given as:
68*/
Ben Skeggs6ee73862009-12-11 19:24:15 +100069
70int
71nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
72 uint32_t size, int align, uint32_t flags,
73 struct nouveau_gpuobj **gpuobj_ret)
74{
75 struct drm_nouveau_private *dev_priv = dev->dev_private;
76 struct nouveau_engine *engine = &dev_priv->engine;
77 struct nouveau_gpuobj *gpuobj;
Ben Skeggs5125bfd2010-09-01 15:24:33 +100078 struct drm_mm_node *ramin = NULL;
Ben Skeggs6ee73862009-12-11 19:24:15 +100079 int ret;
80
81 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
82 chan ? chan->id : -1, size, align, flags);
83
84 if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
85 return -EINVAL;
86
87 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
88 if (!gpuobj)
89 return -ENOMEM;
90 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
Ben Skeggsb3beb162010-09-01 15:24:29 +100091 gpuobj->dev = dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +100092 gpuobj->flags = flags;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +100093 gpuobj->refcount = 1;
Ben Skeggs43efc9c2010-09-01 15:24:32 +100094 gpuobj->size = size;
Ben Skeggs6ee73862009-12-11 19:24:15 +100095
96 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
97
Ben Skeggs6ee73862009-12-11 19:24:15 +100098 if (chan) {
Ben Skeggs816544b2010-07-08 13:15:05 +100099 NV_DEBUG(dev, "channel heap\n");
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000100
101 ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
102 if (ramin)
103 ramin = drm_mm_get_block(ramin, size, align);
104
105 if (!ramin) {
106 nouveau_gpuobj_ref(NULL, &gpuobj);
107 return -ENOMEM;
108 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000109 } else {
110 NV_DEBUG(dev, "global heap\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000111
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000112 /* allocate backing pages, sets vinst */
Ben Skeggs6ee73862009-12-11 19:24:15 +1000113 ret = engine->instmem.populate(dev, gpuobj, &size);
114 if (ret) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000115 nouveau_gpuobj_ref(NULL, &gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000116 return ret;
117 }
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000118
119 /* try and get aperture space */
120 ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
121 if (ramin)
122 ramin = drm_mm_get_block(ramin, size, align);
123
124 /* on nv50 it's ok to fail, we have a fallback path */
125 if (!ramin && dev_priv->card_type < NV_50) {
126 nouveau_gpuobj_ref(NULL, &gpuobj);
127 return -ENOMEM;
128 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000129 }
130
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000131 /* if we got a chunk of the aperture, map pages into it */
132 gpuobj->im_pramin = ramin;
133 if (!chan && gpuobj->im_pramin) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000134 ret = engine->instmem.bind(dev, gpuobj);
135 if (ret) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000136 nouveau_gpuobj_ref(NULL, &gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000137 return ret;
138 }
139 }
140
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000141 /* calculate the various different addresses for the object */
142 if (chan) {
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000143 gpuobj->pinst = chan->ramin->pinst;
144 if (gpuobj->pinst != ~0)
145 gpuobj->pinst += gpuobj->im_pramin->start;
146
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000147 if (dev_priv->card_type < NV_50) {
148 gpuobj->cinst = gpuobj->pinst;
149 } else {
150 gpuobj->cinst = gpuobj->im_pramin->start;
151 gpuobj->vinst = gpuobj->im_pramin->start +
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000152 chan->ramin->vinst;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000153 }
154 } else {
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000155 if (gpuobj->im_pramin)
156 gpuobj->pinst = gpuobj->im_pramin->start;
157 else
158 gpuobj->pinst = ~0;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000159 gpuobj->cinst = 0xdeadbeef;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000160 }
161
Ben Skeggs6ee73862009-12-11 19:24:15 +1000162 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
163 int i;
164
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000165 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000166 nv_wo32(gpuobj, i, 0);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000167 engine->instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000168 }
169
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000170
Ben Skeggs6ee73862009-12-11 19:24:15 +1000171 *gpuobj_ret = gpuobj;
172 return 0;
173}
174
175int
176nouveau_gpuobj_early_init(struct drm_device *dev)
177{
178 struct drm_nouveau_private *dev_priv = dev->dev_private;
179
180 NV_DEBUG(dev, "\n");
181
182 INIT_LIST_HEAD(&dev_priv->gpuobj_list);
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000183 spin_lock_init(&dev_priv->ramin_lock);
184 dev_priv->ramin_base = ~0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000185
186 return 0;
187}
188
189int
190nouveau_gpuobj_init(struct drm_device *dev)
191{
192 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000193 struct nouveau_gpuobj *ramht = NULL;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000194 int ret;
195
196 NV_DEBUG(dev, "\n");
197
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000198 if (dev_priv->card_type >= NV_50)
199 return 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000200
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000201 ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset, ~0,
202 dev_priv->ramht_size,
203 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
204 if (ret)
205 return ret;
206
207 ret = nouveau_ramht_new(dev, ramht, &dev_priv->ramht);
208 nouveau_gpuobj_ref(NULL, &ramht);
209 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000210}
211
212void
213nouveau_gpuobj_takedown(struct drm_device *dev)
214{
215 struct drm_nouveau_private *dev_priv = dev->dev_private;
216
217 NV_DEBUG(dev, "\n");
218
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000219 nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000220}
221
222void
223nouveau_gpuobj_late_takedown(struct drm_device *dev)
224{
225 struct drm_nouveau_private *dev_priv = dev->dev_private;
226 struct nouveau_gpuobj *gpuobj = NULL;
227 struct list_head *entry, *tmp;
228
229 NV_DEBUG(dev, "\n");
230
231 list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) {
232 gpuobj = list_entry(entry, struct nouveau_gpuobj, list);
233
234 NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n",
235 gpuobj, gpuobj->refcount);
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000236
237 gpuobj->refcount = 1;
238 nouveau_gpuobj_ref(NULL, &gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000239 }
240}
241
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000242static int
243nouveau_gpuobj_del(struct nouveau_gpuobj *gpuobj)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000244{
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000245 struct drm_device *dev = gpuobj->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000246 struct drm_nouveau_private *dev_priv = dev->dev_private;
247 struct nouveau_engine *engine = &dev_priv->engine;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000248 int i;
249
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000250 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000251
252 if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000253 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000254 nv_wo32(gpuobj, i, 0);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000255 engine->instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000256 }
257
258 if (gpuobj->dtor)
259 gpuobj->dtor(dev, gpuobj);
260
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000261 if (gpuobj->im_backing)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000262 engine->instmem.clear(dev, gpuobj);
263
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000264 if (gpuobj->im_pramin)
265 drm_mm_put_block(gpuobj->im_pramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000266
267 list_del(&gpuobj->list);
268
Ben Skeggs6ee73862009-12-11 19:24:15 +1000269 kfree(gpuobj);
270 return 0;
271}
272
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000273void
274nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000275{
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000276 if (ref)
277 ref->refcount++;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000278
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000279 if (*ptr && --(*ptr)->refcount == 0)
280 nouveau_gpuobj_del(*ptr);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000281
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000282 *ptr = ref;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000283}
284
285int
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000286nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
287 u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000288{
289 struct drm_nouveau_private *dev_priv = dev->dev_private;
290 struct nouveau_gpuobj *gpuobj = NULL;
291 int i;
292
293 NV_DEBUG(dev,
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000294 "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",
295 pinst, vinst, size, flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000296
297 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
298 if (!gpuobj)
299 return -ENOMEM;
300 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000301 gpuobj->dev = dev;
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000302 gpuobj->flags = flags;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000303 gpuobj->refcount = 1;
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000304 gpuobj->size = size;
305 gpuobj->pinst = pinst;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000306 gpuobj->cinst = 0xdeadbeef;
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000307 gpuobj->vinst = vinst;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000308
Ben Skeggs6ee73862009-12-11 19:24:15 +1000309 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000310 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000311 nv_wo32(gpuobj, i, 0);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000312 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000313 }
314
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000315 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
316 *pgpuobj = gpuobj;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000317 return 0;
318}
319
320
321static uint32_t
322nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
323{
324 struct drm_nouveau_private *dev_priv = dev->dev_private;
325
326 /*XXX: dodgy hack for now */
327 if (dev_priv->card_type >= NV_50)
328 return 24;
329 if (dev_priv->card_type >= NV_40)
330 return 32;
331 return 16;
332}
333
334/*
335 DMA objects are used to reference a piece of memory in the
336 framebuffer, PCI or AGP address space. Each object is 16 bytes big
337 and looks as follows:
338
339 entry[0]
340 11:0 class (seems like I can always use 0 here)
341 12 page table present?
342 13 page entry linear?
343 15:14 access: 0 rw, 1 ro, 2 wo
344 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
345 31:20 dma adjust (bits 0-11 of the address)
346 entry[1]
347 dma limit (size of transfer)
348 entry[X]
349 1 0 readonly, 1 readwrite
350 31:12 dma frame address of the page (bits 12-31 of the address)
351 entry[N]
352 page table terminator, same value as the first pte, as does nvidia
353 rivatv uses 0xffffffff
354
355 Non linear page tables need a list of frame addresses afterwards,
356 the rivatv project has some info on this.
357
358 The method below creates a DMA object in instance RAM and returns a handle
359 to it that can be used to set up context objects.
360*/
361int
362nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
363 uint64_t offset, uint64_t size, int access,
364 int target, struct nouveau_gpuobj **gpuobj)
365{
366 struct drm_device *dev = chan->dev;
367 struct drm_nouveau_private *dev_priv = dev->dev_private;
368 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
369 int ret;
370
371 NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
372 chan->id, class, offset, size);
373 NV_DEBUG(dev, "access=%d target=%d\n", access, target);
374
375 switch (target) {
376 case NV_DMA_TARGET_AGP:
377 offset += dev_priv->gart_info.aper_base;
378 break;
379 default:
380 break;
381 }
382
383 ret = nouveau_gpuobj_new(dev, chan,
384 nouveau_gpuobj_class_instmem_size(dev, class),
385 16, NVOBJ_FLAG_ZERO_ALLOC |
386 NVOBJ_FLAG_ZERO_FREE, gpuobj);
387 if (ret) {
388 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
389 return ret;
390 }
391
Ben Skeggs6ee73862009-12-11 19:24:15 +1000392 if (dev_priv->card_type < NV_50) {
393 uint32_t frame, adjust, pte_flags = 0;
394
395 if (access != NV_DMA_ACCESS_RO)
396 pte_flags |= (1<<1);
397 adjust = offset & 0x00000fff;
398 frame = offset & ~0x00000fff;
399
Ben Skeggsb3beb162010-09-01 15:24:29 +1000400 nv_wo32(*gpuobj, 0, ((1<<12) | (1<<13) | (adjust << 20) |
401 (access << 14) | (target << 16) |
402 class));
403 nv_wo32(*gpuobj, 4, size - 1);
404 nv_wo32(*gpuobj, 8, frame | pte_flags);
405 nv_wo32(*gpuobj, 12, frame | pte_flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000406 } else {
407 uint64_t limit = offset + size - 1;
408 uint32_t flags0, flags5;
409
410 if (target == NV_DMA_TARGET_VIDMEM) {
411 flags0 = 0x00190000;
412 flags5 = 0x00010000;
413 } else {
414 flags0 = 0x7fc00000;
415 flags5 = 0x00080000;
416 }
417
Ben Skeggsb3beb162010-09-01 15:24:29 +1000418 nv_wo32(*gpuobj, 0, flags0 | class);
419 nv_wo32(*gpuobj, 4, lower_32_bits(limit));
420 nv_wo32(*gpuobj, 8, lower_32_bits(offset));
421 nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) |
422 (upper_32_bits(offset) & 0xff));
423 nv_wo32(*gpuobj, 20, flags5);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000424 }
425
Ben Skeggsf56cb862010-07-08 11:29:10 +1000426 instmem->flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000427
428 (*gpuobj)->engine = NVOBJ_ENGINE_SW;
429 (*gpuobj)->class = class;
430 return 0;
431}
432
433int
434nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
435 uint64_t offset, uint64_t size, int access,
436 struct nouveau_gpuobj **gpuobj,
437 uint32_t *o_ret)
438{
439 struct drm_device *dev = chan->dev;
440 struct drm_nouveau_private *dev_priv = dev->dev_private;
441 int ret;
442
443 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
444 (dev_priv->card_type >= NV_50 &&
445 dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
446 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
447 offset + dev_priv->vm_gart_base,
448 size, access, NV_DMA_TARGET_AGP,
449 gpuobj);
450 if (o_ret)
451 *o_ret = 0;
452 } else
453 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000454 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000455 if (offset & ~0xffffffffULL) {
456 NV_ERROR(dev, "obj offset exceeds 32-bits\n");
457 return -EINVAL;
458 }
459 if (o_ret)
460 *o_ret = (uint32_t)offset;
461 ret = (*gpuobj != NULL) ? 0 : -EINVAL;
462 } else {
463 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
464 return -EINVAL;
465 }
466
467 return ret;
468}
469
470/* Context objects in the instance RAM have the following structure.
471 * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
472
473 NV4 - NV30:
474
475 entry[0]
476 11:0 class
477 12 chroma key enable
478 13 user clip enable
479 14 swizzle enable
480 17:15 patch config:
481 scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
482 18 synchronize enable
483 19 endian: 1 big, 0 little
484 21:20 dither mode
485 23 single step enable
486 24 patch status: 0 invalid, 1 valid
487 25 context_surface 0: 1 valid
488 26 context surface 1: 1 valid
489 27 context pattern: 1 valid
490 28 context rop: 1 valid
491 29,30 context beta, beta4
492 entry[1]
493 7:0 mono format
494 15:8 color format
495 31:16 notify instance address
496 entry[2]
497 15:0 dma 0 instance address
498 31:16 dma 1 instance address
499 entry[3]
500 dma method traps
501
502 NV40:
503 No idea what the exact format is. Here's what can be deducted:
504
505 entry[0]:
506 11:0 class (maybe uses more bits here?)
507 17 user clip enable
508 21:19 patch config
509 25 patch status valid ?
510 entry[1]:
511 15:0 DMA notifier (maybe 20:0)
512 entry[2]:
513 15:0 DMA 0 instance (maybe 20:0)
514 24 big endian
515 entry[3]:
516 15:0 DMA 1 instance (maybe 20:0)
517 entry[4]:
518 entry[5]:
519 set to 0?
520*/
521int
522nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
523 struct nouveau_gpuobj **gpuobj)
524{
525 struct drm_device *dev = chan->dev;
526 struct drm_nouveau_private *dev_priv = dev->dev_private;
527 int ret;
528
529 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
530
531 ret = nouveau_gpuobj_new(dev, chan,
532 nouveau_gpuobj_class_instmem_size(dev, class),
533 16,
534 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
535 gpuobj);
536 if (ret) {
537 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
538 return ret;
539 }
540
Ben Skeggs6ee73862009-12-11 19:24:15 +1000541 if (dev_priv->card_type >= NV_50) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000542 nv_wo32(*gpuobj, 0, class);
543 nv_wo32(*gpuobj, 20, 0x00010000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000544 } else {
545 switch (class) {
546 case NV_CLASS_NULL:
Ben Skeggsb3beb162010-09-01 15:24:29 +1000547 nv_wo32(*gpuobj, 0, 0x00001030);
548 nv_wo32(*gpuobj, 4, 0xFFFFFFFF);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000549 break;
550 default:
551 if (dev_priv->card_type >= NV_40) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000552 nv_wo32(*gpuobj, 0, class);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000553#ifdef __BIG_ENDIAN
Ben Skeggsb3beb162010-09-01 15:24:29 +1000554 nv_wo32(*gpuobj, 8, 0x01000000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000555#endif
556 } else {
557#ifdef __BIG_ENDIAN
Ben Skeggsb3beb162010-09-01 15:24:29 +1000558 nv_wo32(*gpuobj, 0, class | 0x00080000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000559#else
Ben Skeggsb3beb162010-09-01 15:24:29 +1000560 nv_wo32(*gpuobj, 0, class);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000561#endif
562 }
563 }
564 }
Ben Skeggsf56cb862010-07-08 11:29:10 +1000565 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000566
567 (*gpuobj)->engine = NVOBJ_ENGINE_GR;
568 (*gpuobj)->class = class;
569 return 0;
570}
571
Francisco Jerezf03a3142009-12-26 02:42:45 +0100572int
Ben Skeggs6ee73862009-12-11 19:24:15 +1000573nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
574 struct nouveau_gpuobj **gpuobj_ret)
575{
Marcin Slusarzdd19e442010-01-30 15:41:00 +0100576 struct drm_nouveau_private *dev_priv;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000577 struct nouveau_gpuobj *gpuobj;
578
579 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
580 return -EINVAL;
Marcin Slusarzdd19e442010-01-30 15:41:00 +0100581 dev_priv = chan->dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000582
583 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
584 if (!gpuobj)
585 return -ENOMEM;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000586 gpuobj->dev = chan->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000587 gpuobj->engine = NVOBJ_ENGINE_SW;
588 gpuobj->class = class;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000589 gpuobj->refcount = 1;
590 gpuobj->cinst = 0x40;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000591
592 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
593 *gpuobj_ret = gpuobj;
594 return 0;
595}
596
597static int
598nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
599{
600 struct drm_device *dev = chan->dev;
601 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000602 uint32_t size;
603 uint32_t base;
604 int ret;
605
606 NV_DEBUG(dev, "ch%d\n", chan->id);
607
608 /* Base amount for object storage (4KiB enough?) */
609 size = 0x1000;
610 base = 0;
611
612 /* PGRAPH context */
Ben Skeggs816544b2010-07-08 13:15:05 +1000613 size += dev_priv->engine.graph.grctx_size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000614
615 if (dev_priv->card_type == NV_50) {
616 /* Various fixed table thingos */
617 size += 0x1400; /* mostly unknown stuff */
618 size += 0x4000; /* vm pd */
619 base = 0x6000;
620 /* RAMHT, not sure about setting size yet, 32KiB to be safe */
621 size += 0x8000;
622 /* RAMFC */
623 size += 0x1000;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000624 }
625
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000626 ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000627 if (ret) {
628 NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
629 return ret;
630 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000631
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000632 ret = drm_mm_init(&chan->ramin_heap, base, size);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000633 if (ret) {
634 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000635 nouveau_gpuobj_ref(NULL, &chan->ramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000636 return ret;
637 }
638
639 return 0;
640}
641
642int
643nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
644 uint32_t vram_h, uint32_t tt_h)
645{
646 struct drm_device *dev = chan->dev;
647 struct drm_nouveau_private *dev_priv = dev->dev_private;
648 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
649 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
650 int ret, i;
651
Ben Skeggs6ee73862009-12-11 19:24:15 +1000652 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
653
Ben Skeggs816544b2010-07-08 13:15:05 +1000654 /* Allocate a chunk of memory for per-channel object storage */
655 ret = nouveau_gpuobj_channel_init_pramin(chan);
656 if (ret) {
657 NV_ERROR(dev, "init pramin\n");
658 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000659 }
660
661 /* NV50 VM
662 * - Allocate per-channel page-directory
663 * - Map GART and VRAM into the channel's address space at the
664 * locations determined during init.
665 */
666 if (dev_priv->card_type >= NV_50) {
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000667 u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
668 u64 vm_vinst = chan->ramin->vinst + pgd_offs;
669 u32 vm_pinst = chan->ramin->pinst;
670 u32 pde;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000671
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000672 if (vm_pinst != ~0)
673 vm_pinst += pgd_offs;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000674
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000675 ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000,
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000676 0, &chan->vm_pd);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000677 if (ret)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000678 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000679 for (i = 0; i < 0x4000; i += 8) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000680 nv_wo32(chan->vm_pd, i + 0, 0x00000000);
681 nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000682 }
683
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000684 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma,
685 &chan->vm_gart_pt);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000686 pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000687 nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000688 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000689
Ben Skeggsb3beb162010-09-01 15:24:29 +1000690 pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000691 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000692 nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i],
693 &chan->vm_vram_pt[i]);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000694
Ben Skeggsb3beb162010-09-01 15:24:29 +1000695 nv_wo32(chan->vm_pd, pde + 0,
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000696 chan->vm_vram_pt[i]->vinst | 0x61);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000697 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
698 pde += 8;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000699 }
700
Ben Skeggsf56cb862010-07-08 11:29:10 +1000701 instmem->flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000702 }
703
704 /* RAMHT */
705 if (dev_priv->card_type < NV_50) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000706 nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
707 } else {
708 struct nouveau_gpuobj *ramht = NULL;
709
710 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
711 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000712 if (ret)
713 return ret;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000714
715 ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
716 nouveau_gpuobj_ref(NULL, &ramht);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000717 if (ret)
718 return ret;
719 }
720
721 /* VRAM ctxdma */
722 if (dev_priv->card_type >= NV_50) {
723 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
724 0, dev_priv->vm_end,
725 NV_DMA_ACCESS_RW,
726 NV_DMA_TARGET_AGP, &vram);
727 if (ret) {
728 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
729 return ret;
730 }
731 } else {
732 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000733 0, dev_priv->fb_available_size,
734 NV_DMA_ACCESS_RW,
735 NV_DMA_TARGET_VIDMEM, &vram);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000736 if (ret) {
737 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
738 return ret;
739 }
740 }
741
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000742 ret = nouveau_ramht_insert(chan, vram_h, vram);
743 nouveau_gpuobj_ref(NULL, &vram);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000744 if (ret) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000745 NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000746 return ret;
747 }
748
749 /* TT memory ctxdma */
750 if (dev_priv->card_type >= NV_50) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000751 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
752 0, dev_priv->vm_end,
753 NV_DMA_ACCESS_RW,
754 NV_DMA_TARGET_AGP, &tt);
755 if (ret) {
756 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
757 return ret;
758 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000759 } else
760 if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
761 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
762 dev_priv->gart_info.aper_size,
763 NV_DMA_ACCESS_RW, &tt, NULL);
764 } else {
765 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
766 ret = -EINVAL;
767 }
768
769 if (ret) {
770 NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
771 return ret;
772 }
773
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000774 ret = nouveau_ramht_insert(chan, tt_h, tt);
775 nouveau_gpuobj_ref(NULL, &tt);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000776 if (ret) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000777 NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000778 return ret;
779 }
780
781 return 0;
782}
783
784void
785nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
786{
787 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
788 struct drm_device *dev = chan->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000789 int i;
790
791 NV_DEBUG(dev, "ch%d\n", chan->id);
792
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000793 if (!chan->ramht)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000794 return;
795
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000796 nouveau_ramht_ref(NULL, &chan->ramht, chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000797
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000798 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
799 nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000800 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000801 nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000802
Ben Skeggsb833ac22010-06-01 15:32:24 +1000803 if (chan->ramin_heap.free_stack.next)
804 drm_mm_takedown(&chan->ramin_heap);
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000805 nouveau_gpuobj_ref(NULL, &chan->ramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000806}
807
808int
809nouveau_gpuobj_suspend(struct drm_device *dev)
810{
811 struct drm_nouveau_private *dev_priv = dev->dev_private;
812 struct nouveau_gpuobj *gpuobj;
813 int i;
814
815 if (dev_priv->card_type < NV_50) {
816 dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram);
817 if (!dev_priv->susres.ramin_copy)
818 return -ENOMEM;
819
820 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
821 dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i);
822 return 0;
823 }
824
825 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000826 if (!gpuobj->im_backing)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000827 continue;
828
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000829 gpuobj->im_backing_suspend = vmalloc(gpuobj->size);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000830 if (!gpuobj->im_backing_suspend) {
831 nouveau_gpuobj_resume(dev);
832 return -ENOMEM;
833 }
834
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000835 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000836 gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000837 }
838
839 return 0;
840}
841
842void
843nouveau_gpuobj_suspend_cleanup(struct drm_device *dev)
844{
845 struct drm_nouveau_private *dev_priv = dev->dev_private;
846 struct nouveau_gpuobj *gpuobj;
847
848 if (dev_priv->card_type < NV_50) {
849 vfree(dev_priv->susres.ramin_copy);
850 dev_priv->susres.ramin_copy = NULL;
851 return;
852 }
853
854 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
855 if (!gpuobj->im_backing_suspend)
856 continue;
857
858 vfree(gpuobj->im_backing_suspend);
859 gpuobj->im_backing_suspend = NULL;
860 }
861}
862
863void
864nouveau_gpuobj_resume(struct drm_device *dev)
865{
866 struct drm_nouveau_private *dev_priv = dev->dev_private;
867 struct nouveau_gpuobj *gpuobj;
868 int i;
869
870 if (dev_priv->card_type < NV_50) {
871 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
872 nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]);
873 nouveau_gpuobj_suspend_cleanup(dev);
874 return;
875 }
876
877 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
878 if (!gpuobj->im_backing_suspend)
879 continue;
880
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000881 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000882 nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000883 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000884 }
885
886 nouveau_gpuobj_suspend_cleanup(dev);
887}
888
889int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
890 struct drm_file *file_priv)
891{
892 struct drm_nouveau_private *dev_priv = dev->dev_private;
893 struct drm_nouveau_grobj_alloc *init = data;
894 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
895 struct nouveau_pgraph_object_class *grc;
896 struct nouveau_gpuobj *gr = NULL;
897 struct nouveau_channel *chan;
898 int ret;
899
Ben Skeggs6ee73862009-12-11 19:24:15 +1000900 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
901
902 if (init->handle == ~0)
903 return -EINVAL;
904
905 grc = pgraph->grclass;
906 while (grc->id) {
907 if (grc->id == init->class)
908 break;
909 grc++;
910 }
911
912 if (!grc->id) {
913 NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class);
914 return -EPERM;
915 }
916
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000917 if (nouveau_ramht_find(chan, init->handle))
Ben Skeggs6ee73862009-12-11 19:24:15 +1000918 return -EEXIST;
919
920 if (!grc->software)
921 ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
922 else
923 ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000924 if (ret) {
925 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
926 ret, init->channel, init->handle);
927 return ret;
928 }
929
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000930 ret = nouveau_ramht_insert(chan, init->handle, gr);
931 nouveau_gpuobj_ref(NULL, &gr);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000932 if (ret) {
933 NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
934 ret, init->channel, init->handle);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000935 return ret;
936 }
937
938 return 0;
939}
940
941int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
942 struct drm_file *file_priv)
943{
944 struct drm_nouveau_gpuobj_free *objfree = data;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000945 struct nouveau_gpuobj *gpuobj;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000946 struct nouveau_channel *chan;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000947
Ben Skeggs6ee73862009-12-11 19:24:15 +1000948 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
949
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000950 gpuobj = nouveau_ramht_find(chan, objfree->handle);
951 if (!gpuobj)
952 return -ENOENT;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000953
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000954 nouveau_ramht_remove(chan, objfree->handle);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000955 return 0;
956}
Ben Skeggsb3beb162010-09-01 15:24:29 +1000957
958u32
959nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
960{
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000961 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
962 struct drm_device *dev = gpuobj->dev;
963
964 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
965 u64 ptr = gpuobj->vinst + offset;
966 u32 base = ptr >> 16;
967 u32 val;
968
969 spin_lock(&dev_priv->ramin_lock);
970 if (dev_priv->ramin_base != base) {
971 dev_priv->ramin_base = base;
972 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
973 }
974 val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
975 spin_unlock(&dev_priv->ramin_lock);
976 return val;
977 }
978
979 return nv_ri32(dev, gpuobj->pinst + offset);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000980}
981
982void
983nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
984{
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000985 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
986 struct drm_device *dev = gpuobj->dev;
987
988 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
989 u64 ptr = gpuobj->vinst + offset;
990 u32 base = ptr >> 16;
991
992 spin_lock(&dev_priv->ramin_lock);
993 if (dev_priv->ramin_base != base) {
994 dev_priv->ramin_base = base;
995 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
996 }
997 nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
998 spin_unlock(&dev_priv->ramin_lock);
999 return;
1000 }
1001
1002 nv_wi32(dev, gpuobj->pinst + offset, val);
Ben Skeggsb3beb162010-09-01 15:24:29 +10001003}