blob: b68922f2fe544b18d6de24df7003c5e014a99d7f [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright (C) 2006 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28/*
29 * Authors:
30 * Ben Skeggs <darktama@iinet.net.au>
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "nouveau_drv.h"
36#include "nouveau_drm.h"
Ben Skeggs479dcae2010-09-01 15:24:28 +100037#include "nouveau_ramht.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100038
39/* NVidia uses context objects to drive drawing operations.
40
41 Context objects can be selected into 8 subchannels in the FIFO,
42 and then used via DMA command buffers.
43
44 A context object is referenced by a user defined handle (CARD32). The HW
45 looks up graphics objects in a hash table in the instance RAM.
46
47 An entry in the hash table consists of 2 CARD32. The first CARD32 contains
48 the handle, the second one a bitfield, that contains the address of the
49 object in instance RAM.
50
51 The format of the second CARD32 seems to be:
52
53 NV4 to NV30:
54
55 15: 0 instance_addr >> 4
56 17:16 engine (here uses 1 = graphics)
57 28:24 channel id (here uses 0)
58 31 valid (use 1)
59
60 NV40:
61
62 15: 0 instance_addr >> 4 (maybe 19-0)
63 21:20 engine (here uses 1 = graphics)
64 I'm unsure about the other bits, but using 0 seems to work.
65
66 The key into the hash table depends on the object handle and channel id and
67 is given as:
68*/
Ben Skeggs6ee73862009-12-11 19:24:15 +100069
70int
71nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
72 uint32_t size, int align, uint32_t flags,
73 struct nouveau_gpuobj **gpuobj_ret)
74{
75 struct drm_nouveau_private *dev_priv = dev->dev_private;
76 struct nouveau_engine *engine = &dev_priv->engine;
77 struct nouveau_gpuobj *gpuobj;
Ben Skeggs5125bfd2010-09-01 15:24:33 +100078 struct drm_mm_node *ramin = NULL;
Ben Skeggs6ee73862009-12-11 19:24:15 +100079 int ret;
80
81 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
82 chan ? chan->id : -1, size, align, flags);
83
84 if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
85 return -EINVAL;
86
87 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
88 if (!gpuobj)
89 return -ENOMEM;
90 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
Ben Skeggsb3beb162010-09-01 15:24:29 +100091 gpuobj->dev = dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +100092 gpuobj->flags = flags;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +100093 gpuobj->refcount = 1;
Ben Skeggs43efc9c2010-09-01 15:24:32 +100094 gpuobj->size = size;
Ben Skeggs6ee73862009-12-11 19:24:15 +100095
96 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
97
Ben Skeggs6ee73862009-12-11 19:24:15 +100098 if (chan) {
Ben Skeggs816544b2010-07-08 13:15:05 +100099 NV_DEBUG(dev, "channel heap\n");
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000100
101 ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
102 if (ramin)
103 ramin = drm_mm_get_block(ramin, size, align);
104
105 if (!ramin) {
106 nouveau_gpuobj_ref(NULL, &gpuobj);
107 return -ENOMEM;
108 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000109 } else {
110 NV_DEBUG(dev, "global heap\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000111
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000112 /* allocate backing pages, sets vinst */
Ben Skeggs6ee73862009-12-11 19:24:15 +1000113 ret = engine->instmem.populate(dev, gpuobj, &size);
114 if (ret) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000115 nouveau_gpuobj_ref(NULL, &gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000116 return ret;
117 }
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000118
119 /* try and get aperture space */
120 ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
121 if (ramin)
122 ramin = drm_mm_get_block(ramin, size, align);
123
124 /* on nv50 it's ok to fail, we have a fallback path */
125 if (!ramin && dev_priv->card_type < NV_50) {
126 nouveau_gpuobj_ref(NULL, &gpuobj);
127 return -ENOMEM;
128 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000129 }
130
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000131 /* if we got a chunk of the aperture, map pages into it */
132 gpuobj->im_pramin = ramin;
Ben Skeggsfbd28952010-09-01 15:24:34 +1000133 if (!chan && gpuobj->im_pramin && dev_priv->ramin_available) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000134 ret = engine->instmem.bind(dev, gpuobj);
135 if (ret) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000136 nouveau_gpuobj_ref(NULL, &gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000137 return ret;
138 }
139 }
140
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000141 /* calculate the various different addresses for the object */
142 if (chan) {
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000143 gpuobj->pinst = chan->ramin->pinst;
144 if (gpuobj->pinst != ~0)
145 gpuobj->pinst += gpuobj->im_pramin->start;
146
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000147 if (dev_priv->card_type < NV_50) {
148 gpuobj->cinst = gpuobj->pinst;
149 } else {
150 gpuobj->cinst = gpuobj->im_pramin->start;
151 gpuobj->vinst = gpuobj->im_pramin->start +
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000152 chan->ramin->vinst;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000153 }
154 } else {
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000155 if (gpuobj->im_pramin)
156 gpuobj->pinst = gpuobj->im_pramin->start;
157 else
158 gpuobj->pinst = ~0;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000159 gpuobj->cinst = 0xdeadbeef;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000160 }
161
Ben Skeggs6ee73862009-12-11 19:24:15 +1000162 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
163 int i;
164
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000165 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000166 nv_wo32(gpuobj, i, 0);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000167 engine->instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000168 }
169
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000170
Ben Skeggs6ee73862009-12-11 19:24:15 +1000171 *gpuobj_ret = gpuobj;
172 return 0;
173}
174
175int
Ben Skeggsfbd28952010-09-01 15:24:34 +1000176nouveau_gpuobj_init(struct drm_device *dev)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000177{
178 struct drm_nouveau_private *dev_priv = dev->dev_private;
179
180 NV_DEBUG(dev, "\n");
181
182 INIT_LIST_HEAD(&dev_priv->gpuobj_list);
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000183 spin_lock_init(&dev_priv->ramin_lock);
184 dev_priv->ramin_base = ~0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000185
186 return 0;
187}
188
Ben Skeggs6ee73862009-12-11 19:24:15 +1000189void
190nouveau_gpuobj_takedown(struct drm_device *dev)
191{
192 struct drm_nouveau_private *dev_priv = dev->dev_private;
193
194 NV_DEBUG(dev, "\n");
195
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000196 nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000197}
198
199void
200nouveau_gpuobj_late_takedown(struct drm_device *dev)
201{
202 struct drm_nouveau_private *dev_priv = dev->dev_private;
203 struct nouveau_gpuobj *gpuobj = NULL;
204 struct list_head *entry, *tmp;
205
206 NV_DEBUG(dev, "\n");
207
208 list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) {
209 gpuobj = list_entry(entry, struct nouveau_gpuobj, list);
210
211 NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n",
212 gpuobj, gpuobj->refcount);
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000213
214 gpuobj->refcount = 1;
215 nouveau_gpuobj_ref(NULL, &gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000216 }
217}
218
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000219static int
220nouveau_gpuobj_del(struct nouveau_gpuobj *gpuobj)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000221{
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000222 struct drm_device *dev = gpuobj->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000223 struct drm_nouveau_private *dev_priv = dev->dev_private;
224 struct nouveau_engine *engine = &dev_priv->engine;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000225 int i;
226
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000227 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000228
229 if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000230 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000231 nv_wo32(gpuobj, i, 0);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000232 engine->instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000233 }
234
235 if (gpuobj->dtor)
236 gpuobj->dtor(dev, gpuobj);
237
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000238 if (gpuobj->im_backing)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000239 engine->instmem.clear(dev, gpuobj);
240
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000241 if (gpuobj->im_pramin)
242 drm_mm_put_block(gpuobj->im_pramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000243
244 list_del(&gpuobj->list);
245
Ben Skeggs6ee73862009-12-11 19:24:15 +1000246 kfree(gpuobj);
247 return 0;
248}
249
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000250void
251nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000252{
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000253 if (ref)
254 ref->refcount++;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000255
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000256 if (*ptr && --(*ptr)->refcount == 0)
257 nouveau_gpuobj_del(*ptr);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000258
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000259 *ptr = ref;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000260}
261
262int
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000263nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
264 u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000265{
266 struct drm_nouveau_private *dev_priv = dev->dev_private;
267 struct nouveau_gpuobj *gpuobj = NULL;
268 int i;
269
270 NV_DEBUG(dev,
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000271 "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",
272 pinst, vinst, size, flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000273
274 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
275 if (!gpuobj)
276 return -ENOMEM;
277 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000278 gpuobj->dev = dev;
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000279 gpuobj->flags = flags;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000280 gpuobj->refcount = 1;
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000281 gpuobj->size = size;
282 gpuobj->pinst = pinst;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000283 gpuobj->cinst = 0xdeadbeef;
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000284 gpuobj->vinst = vinst;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000285
Ben Skeggs6ee73862009-12-11 19:24:15 +1000286 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000287 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000288 nv_wo32(gpuobj, i, 0);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000289 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000290 }
291
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000292 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
293 *pgpuobj = gpuobj;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000294 return 0;
295}
296
297
298static uint32_t
299nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
300{
301 struct drm_nouveau_private *dev_priv = dev->dev_private;
302
303 /*XXX: dodgy hack for now */
304 if (dev_priv->card_type >= NV_50)
305 return 24;
306 if (dev_priv->card_type >= NV_40)
307 return 32;
308 return 16;
309}
310
311/*
312 DMA objects are used to reference a piece of memory in the
313 framebuffer, PCI or AGP address space. Each object is 16 bytes big
314 and looks as follows:
315
316 entry[0]
317 11:0 class (seems like I can always use 0 here)
318 12 page table present?
319 13 page entry linear?
320 15:14 access: 0 rw, 1 ro, 2 wo
321 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
322 31:20 dma adjust (bits 0-11 of the address)
323 entry[1]
324 dma limit (size of transfer)
325 entry[X]
326 1 0 readonly, 1 readwrite
327 31:12 dma frame address of the page (bits 12-31 of the address)
328 entry[N]
329 page table terminator, same value as the first pte, as does nvidia
330 rivatv uses 0xffffffff
331
332 Non linear page tables need a list of frame addresses afterwards,
333 the rivatv project has some info on this.
334
335 The method below creates a DMA object in instance RAM and returns a handle
336 to it that can be used to set up context objects.
337*/
338int
339nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
340 uint64_t offset, uint64_t size, int access,
341 int target, struct nouveau_gpuobj **gpuobj)
342{
343 struct drm_device *dev = chan->dev;
344 struct drm_nouveau_private *dev_priv = dev->dev_private;
345 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
346 int ret;
347
348 NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
349 chan->id, class, offset, size);
350 NV_DEBUG(dev, "access=%d target=%d\n", access, target);
351
352 switch (target) {
353 case NV_DMA_TARGET_AGP:
354 offset += dev_priv->gart_info.aper_base;
355 break;
356 default:
357 break;
358 }
359
360 ret = nouveau_gpuobj_new(dev, chan,
361 nouveau_gpuobj_class_instmem_size(dev, class),
362 16, NVOBJ_FLAG_ZERO_ALLOC |
363 NVOBJ_FLAG_ZERO_FREE, gpuobj);
364 if (ret) {
365 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
366 return ret;
367 }
368
Ben Skeggs6ee73862009-12-11 19:24:15 +1000369 if (dev_priv->card_type < NV_50) {
370 uint32_t frame, adjust, pte_flags = 0;
371
372 if (access != NV_DMA_ACCESS_RO)
373 pte_flags |= (1<<1);
374 adjust = offset & 0x00000fff;
375 frame = offset & ~0x00000fff;
376
Ben Skeggsb3beb162010-09-01 15:24:29 +1000377 nv_wo32(*gpuobj, 0, ((1<<12) | (1<<13) | (adjust << 20) |
378 (access << 14) | (target << 16) |
379 class));
380 nv_wo32(*gpuobj, 4, size - 1);
381 nv_wo32(*gpuobj, 8, frame | pte_flags);
382 nv_wo32(*gpuobj, 12, frame | pte_flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000383 } else {
384 uint64_t limit = offset + size - 1;
385 uint32_t flags0, flags5;
386
387 if (target == NV_DMA_TARGET_VIDMEM) {
388 flags0 = 0x00190000;
389 flags5 = 0x00010000;
390 } else {
391 flags0 = 0x7fc00000;
392 flags5 = 0x00080000;
393 }
394
Ben Skeggsb3beb162010-09-01 15:24:29 +1000395 nv_wo32(*gpuobj, 0, flags0 | class);
396 nv_wo32(*gpuobj, 4, lower_32_bits(limit));
397 nv_wo32(*gpuobj, 8, lower_32_bits(offset));
398 nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) |
399 (upper_32_bits(offset) & 0xff));
400 nv_wo32(*gpuobj, 20, flags5);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000401 }
402
Ben Skeggsf56cb862010-07-08 11:29:10 +1000403 instmem->flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000404
405 (*gpuobj)->engine = NVOBJ_ENGINE_SW;
406 (*gpuobj)->class = class;
407 return 0;
408}
409
410int
411nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
412 uint64_t offset, uint64_t size, int access,
413 struct nouveau_gpuobj **gpuobj,
414 uint32_t *o_ret)
415{
416 struct drm_device *dev = chan->dev;
417 struct drm_nouveau_private *dev_priv = dev->dev_private;
418 int ret;
419
420 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
421 (dev_priv->card_type >= NV_50 &&
422 dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
423 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
424 offset + dev_priv->vm_gart_base,
425 size, access, NV_DMA_TARGET_AGP,
426 gpuobj);
427 if (o_ret)
428 *o_ret = 0;
429 } else
430 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000431 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000432 if (offset & ~0xffffffffULL) {
433 NV_ERROR(dev, "obj offset exceeds 32-bits\n");
434 return -EINVAL;
435 }
436 if (o_ret)
437 *o_ret = (uint32_t)offset;
438 ret = (*gpuobj != NULL) ? 0 : -EINVAL;
439 } else {
440 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
441 return -EINVAL;
442 }
443
444 return ret;
445}
446
447/* Context objects in the instance RAM have the following structure.
448 * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
449
450 NV4 - NV30:
451
452 entry[0]
453 11:0 class
454 12 chroma key enable
455 13 user clip enable
456 14 swizzle enable
457 17:15 patch config:
458 scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
459 18 synchronize enable
460 19 endian: 1 big, 0 little
461 21:20 dither mode
462 23 single step enable
463 24 patch status: 0 invalid, 1 valid
464 25 context_surface 0: 1 valid
465 26 context surface 1: 1 valid
466 27 context pattern: 1 valid
467 28 context rop: 1 valid
468 29,30 context beta, beta4
469 entry[1]
470 7:0 mono format
471 15:8 color format
472 31:16 notify instance address
473 entry[2]
474 15:0 dma 0 instance address
475 31:16 dma 1 instance address
476 entry[3]
477 dma method traps
478
479 NV40:
480 No idea what the exact format is. Here's what can be deducted:
481
482 entry[0]:
483 11:0 class (maybe uses more bits here?)
484 17 user clip enable
485 21:19 patch config
486 25 patch status valid ?
487 entry[1]:
488 15:0 DMA notifier (maybe 20:0)
489 entry[2]:
490 15:0 DMA 0 instance (maybe 20:0)
491 24 big endian
492 entry[3]:
493 15:0 DMA 1 instance (maybe 20:0)
494 entry[4]:
495 entry[5]:
496 set to 0?
497*/
498int
499nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
500 struct nouveau_gpuobj **gpuobj)
501{
502 struct drm_device *dev = chan->dev;
503 struct drm_nouveau_private *dev_priv = dev->dev_private;
504 int ret;
505
506 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
507
508 ret = nouveau_gpuobj_new(dev, chan,
509 nouveau_gpuobj_class_instmem_size(dev, class),
510 16,
511 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
512 gpuobj);
513 if (ret) {
514 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
515 return ret;
516 }
517
Ben Skeggs6ee73862009-12-11 19:24:15 +1000518 if (dev_priv->card_type >= NV_50) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000519 nv_wo32(*gpuobj, 0, class);
520 nv_wo32(*gpuobj, 20, 0x00010000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000521 } else {
522 switch (class) {
523 case NV_CLASS_NULL:
Ben Skeggsb3beb162010-09-01 15:24:29 +1000524 nv_wo32(*gpuobj, 0, 0x00001030);
525 nv_wo32(*gpuobj, 4, 0xFFFFFFFF);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000526 break;
527 default:
528 if (dev_priv->card_type >= NV_40) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000529 nv_wo32(*gpuobj, 0, class);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000530#ifdef __BIG_ENDIAN
Ben Skeggsb3beb162010-09-01 15:24:29 +1000531 nv_wo32(*gpuobj, 8, 0x01000000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000532#endif
533 } else {
534#ifdef __BIG_ENDIAN
Ben Skeggsb3beb162010-09-01 15:24:29 +1000535 nv_wo32(*gpuobj, 0, class | 0x00080000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000536#else
Ben Skeggsb3beb162010-09-01 15:24:29 +1000537 nv_wo32(*gpuobj, 0, class);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000538#endif
539 }
540 }
541 }
Ben Skeggsf56cb862010-07-08 11:29:10 +1000542 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000543
544 (*gpuobj)->engine = NVOBJ_ENGINE_GR;
545 (*gpuobj)->class = class;
546 return 0;
547}
548
Francisco Jerezf03a3142009-12-26 02:42:45 +0100549int
Ben Skeggs6ee73862009-12-11 19:24:15 +1000550nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
551 struct nouveau_gpuobj **gpuobj_ret)
552{
Marcin Slusarzdd19e442010-01-30 15:41:00 +0100553 struct drm_nouveau_private *dev_priv;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000554 struct nouveau_gpuobj *gpuobj;
555
556 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
557 return -EINVAL;
Marcin Slusarzdd19e442010-01-30 15:41:00 +0100558 dev_priv = chan->dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000559
560 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
561 if (!gpuobj)
562 return -ENOMEM;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000563 gpuobj->dev = chan->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000564 gpuobj->engine = NVOBJ_ENGINE_SW;
565 gpuobj->class = class;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000566 gpuobj->refcount = 1;
567 gpuobj->cinst = 0x40;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000568
569 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
570 *gpuobj_ret = gpuobj;
571 return 0;
572}
573
574static int
575nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
576{
577 struct drm_device *dev = chan->dev;
578 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000579 uint32_t size;
580 uint32_t base;
581 int ret;
582
583 NV_DEBUG(dev, "ch%d\n", chan->id);
584
585 /* Base amount for object storage (4KiB enough?) */
586 size = 0x1000;
587 base = 0;
588
589 /* PGRAPH context */
Ben Skeggs816544b2010-07-08 13:15:05 +1000590 size += dev_priv->engine.graph.grctx_size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000591
592 if (dev_priv->card_type == NV_50) {
593 /* Various fixed table thingos */
594 size += 0x1400; /* mostly unknown stuff */
595 size += 0x4000; /* vm pd */
596 base = 0x6000;
597 /* RAMHT, not sure about setting size yet, 32KiB to be safe */
598 size += 0x8000;
599 /* RAMFC */
600 size += 0x1000;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000601 }
602
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000603 ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000604 if (ret) {
605 NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
606 return ret;
607 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000608
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000609 ret = drm_mm_init(&chan->ramin_heap, base, size);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000610 if (ret) {
611 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000612 nouveau_gpuobj_ref(NULL, &chan->ramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000613 return ret;
614 }
615
616 return 0;
617}
618
619int
620nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
621 uint32_t vram_h, uint32_t tt_h)
622{
623 struct drm_device *dev = chan->dev;
624 struct drm_nouveau_private *dev_priv = dev->dev_private;
625 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
626 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
627 int ret, i;
628
Ben Skeggs6ee73862009-12-11 19:24:15 +1000629 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
630
Ben Skeggs816544b2010-07-08 13:15:05 +1000631 /* Allocate a chunk of memory for per-channel object storage */
632 ret = nouveau_gpuobj_channel_init_pramin(chan);
633 if (ret) {
634 NV_ERROR(dev, "init pramin\n");
635 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000636 }
637
638 /* NV50 VM
639 * - Allocate per-channel page-directory
640 * - Map GART and VRAM into the channel's address space at the
641 * locations determined during init.
642 */
643 if (dev_priv->card_type >= NV_50) {
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000644 u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
645 u64 vm_vinst = chan->ramin->vinst + pgd_offs;
646 u32 vm_pinst = chan->ramin->pinst;
647 u32 pde;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000648
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000649 if (vm_pinst != ~0)
650 vm_pinst += pgd_offs;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000651
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000652 ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000,
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000653 0, &chan->vm_pd);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000654 if (ret)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000655 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000656 for (i = 0; i < 0x4000; i += 8) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000657 nv_wo32(chan->vm_pd, i + 0, 0x00000000);
658 nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000659 }
660
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000661 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma,
662 &chan->vm_gart_pt);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000663 pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000664 nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000665 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000666
Ben Skeggsb3beb162010-09-01 15:24:29 +1000667 pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000668 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000669 nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i],
670 &chan->vm_vram_pt[i]);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000671
Ben Skeggsb3beb162010-09-01 15:24:29 +1000672 nv_wo32(chan->vm_pd, pde + 0,
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000673 chan->vm_vram_pt[i]->vinst | 0x61);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000674 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
675 pde += 8;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000676 }
677
Ben Skeggsf56cb862010-07-08 11:29:10 +1000678 instmem->flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000679 }
680
681 /* RAMHT */
682 if (dev_priv->card_type < NV_50) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000683 nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
684 } else {
685 struct nouveau_gpuobj *ramht = NULL;
686
687 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
688 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000689 if (ret)
690 return ret;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000691
692 ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
693 nouveau_gpuobj_ref(NULL, &ramht);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000694 if (ret)
695 return ret;
696 }
697
698 /* VRAM ctxdma */
699 if (dev_priv->card_type >= NV_50) {
700 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
701 0, dev_priv->vm_end,
702 NV_DMA_ACCESS_RW,
703 NV_DMA_TARGET_AGP, &vram);
704 if (ret) {
705 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
706 return ret;
707 }
708 } else {
709 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000710 0, dev_priv->fb_available_size,
711 NV_DMA_ACCESS_RW,
712 NV_DMA_TARGET_VIDMEM, &vram);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000713 if (ret) {
714 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
715 return ret;
716 }
717 }
718
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000719 ret = nouveau_ramht_insert(chan, vram_h, vram);
720 nouveau_gpuobj_ref(NULL, &vram);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000721 if (ret) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000722 NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000723 return ret;
724 }
725
726 /* TT memory ctxdma */
727 if (dev_priv->card_type >= NV_50) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000728 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
729 0, dev_priv->vm_end,
730 NV_DMA_ACCESS_RW,
731 NV_DMA_TARGET_AGP, &tt);
732 if (ret) {
733 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
734 return ret;
735 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000736 } else
737 if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
738 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
739 dev_priv->gart_info.aper_size,
740 NV_DMA_ACCESS_RW, &tt, NULL);
741 } else {
742 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
743 ret = -EINVAL;
744 }
745
746 if (ret) {
747 NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
748 return ret;
749 }
750
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000751 ret = nouveau_ramht_insert(chan, tt_h, tt);
752 nouveau_gpuobj_ref(NULL, &tt);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000753 if (ret) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000754 NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000755 return ret;
756 }
757
758 return 0;
759}
760
761void
762nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
763{
764 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
765 struct drm_device *dev = chan->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000766 int i;
767
768 NV_DEBUG(dev, "ch%d\n", chan->id);
769
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000770 if (!chan->ramht)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000771 return;
772
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000773 nouveau_ramht_ref(NULL, &chan->ramht, chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000774
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000775 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
776 nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000777 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000778 nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000779
Ben Skeggsb833ac22010-06-01 15:32:24 +1000780 if (chan->ramin_heap.free_stack.next)
781 drm_mm_takedown(&chan->ramin_heap);
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000782 nouveau_gpuobj_ref(NULL, &chan->ramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000783}
784
785int
786nouveau_gpuobj_suspend(struct drm_device *dev)
787{
788 struct drm_nouveau_private *dev_priv = dev->dev_private;
789 struct nouveau_gpuobj *gpuobj;
790 int i;
791
792 if (dev_priv->card_type < NV_50) {
793 dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram);
794 if (!dev_priv->susres.ramin_copy)
795 return -ENOMEM;
796
797 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
798 dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i);
799 return 0;
800 }
801
802 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000803 if (!gpuobj->im_backing)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000804 continue;
805
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000806 gpuobj->im_backing_suspend = vmalloc(gpuobj->size);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000807 if (!gpuobj->im_backing_suspend) {
808 nouveau_gpuobj_resume(dev);
809 return -ENOMEM;
810 }
811
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000812 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000813 gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000814 }
815
816 return 0;
817}
818
819void
820nouveau_gpuobj_suspend_cleanup(struct drm_device *dev)
821{
822 struct drm_nouveau_private *dev_priv = dev->dev_private;
823 struct nouveau_gpuobj *gpuobj;
824
825 if (dev_priv->card_type < NV_50) {
826 vfree(dev_priv->susres.ramin_copy);
827 dev_priv->susres.ramin_copy = NULL;
828 return;
829 }
830
831 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
832 if (!gpuobj->im_backing_suspend)
833 continue;
834
835 vfree(gpuobj->im_backing_suspend);
836 gpuobj->im_backing_suspend = NULL;
837 }
838}
839
840void
841nouveau_gpuobj_resume(struct drm_device *dev)
842{
843 struct drm_nouveau_private *dev_priv = dev->dev_private;
844 struct nouveau_gpuobj *gpuobj;
845 int i;
846
847 if (dev_priv->card_type < NV_50) {
848 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
849 nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]);
850 nouveau_gpuobj_suspend_cleanup(dev);
851 return;
852 }
853
854 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
855 if (!gpuobj->im_backing_suspend)
856 continue;
857
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000858 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000859 nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000860 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000861 }
862
863 nouveau_gpuobj_suspend_cleanup(dev);
864}
865
866int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
867 struct drm_file *file_priv)
868{
869 struct drm_nouveau_private *dev_priv = dev->dev_private;
870 struct drm_nouveau_grobj_alloc *init = data;
871 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
872 struct nouveau_pgraph_object_class *grc;
873 struct nouveau_gpuobj *gr = NULL;
874 struct nouveau_channel *chan;
875 int ret;
876
Ben Skeggs6ee73862009-12-11 19:24:15 +1000877 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
878
879 if (init->handle == ~0)
880 return -EINVAL;
881
882 grc = pgraph->grclass;
883 while (grc->id) {
884 if (grc->id == init->class)
885 break;
886 grc++;
887 }
888
889 if (!grc->id) {
890 NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class);
891 return -EPERM;
892 }
893
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000894 if (nouveau_ramht_find(chan, init->handle))
Ben Skeggs6ee73862009-12-11 19:24:15 +1000895 return -EEXIST;
896
897 if (!grc->software)
898 ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
899 else
900 ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000901 if (ret) {
902 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
903 ret, init->channel, init->handle);
904 return ret;
905 }
906
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000907 ret = nouveau_ramht_insert(chan, init->handle, gr);
908 nouveau_gpuobj_ref(NULL, &gr);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000909 if (ret) {
910 NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
911 ret, init->channel, init->handle);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000912 return ret;
913 }
914
915 return 0;
916}
917
918int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
919 struct drm_file *file_priv)
920{
921 struct drm_nouveau_gpuobj_free *objfree = data;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000922 struct nouveau_gpuobj *gpuobj;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000923 struct nouveau_channel *chan;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000924
Ben Skeggs6ee73862009-12-11 19:24:15 +1000925 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
926
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000927 gpuobj = nouveau_ramht_find(chan, objfree->handle);
928 if (!gpuobj)
929 return -ENOENT;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000930
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000931 nouveau_ramht_remove(chan, objfree->handle);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000932 return 0;
933}
Ben Skeggsb3beb162010-09-01 15:24:29 +1000934
935u32
936nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
937{
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000938 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
939 struct drm_device *dev = gpuobj->dev;
940
941 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
942 u64 ptr = gpuobj->vinst + offset;
943 u32 base = ptr >> 16;
944 u32 val;
945
946 spin_lock(&dev_priv->ramin_lock);
947 if (dev_priv->ramin_base != base) {
948 dev_priv->ramin_base = base;
949 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
950 }
951 val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
952 spin_unlock(&dev_priv->ramin_lock);
953 return val;
954 }
955
956 return nv_ri32(dev, gpuobj->pinst + offset);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000957}
958
959void
960nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
961{
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000962 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
963 struct drm_device *dev = gpuobj->dev;
964
965 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
966 u64 ptr = gpuobj->vinst + offset;
967 u32 base = ptr >> 16;
968
969 spin_lock(&dev_priv->ramin_lock);
970 if (dev_priv->ramin_base != base) {
971 dev_priv->ramin_base = base;
972 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
973 }
974 nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
975 spin_unlock(&dev_priv->ramin_lock);
976 return;
977 }
978
979 nv_wi32(dev, gpuobj->pinst + offset, val);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000980}