blob: 02a0151b073855a91c883928b642707e99890d9d [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright (C) 2006 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28/*
29 * Authors:
30 * Ben Skeggs <darktama@iinet.net.au>
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "nouveau_drv.h"
36#include "nouveau_drm.h"
Ben Skeggs479dcae2010-09-01 15:24:28 +100037#include "nouveau_ramht.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100038
39/* NVidia uses context objects to drive drawing operations.
40
41 Context objects can be selected into 8 subchannels in the FIFO,
42 and then used via DMA command buffers.
43
44 A context object is referenced by a user defined handle (CARD32). The HW
45 looks up graphics objects in a hash table in the instance RAM.
46
47 An entry in the hash table consists of 2 CARD32. The first CARD32 contains
48 the handle, the second one a bitfield, that contains the address of the
49 object in instance RAM.
50
51 The format of the second CARD32 seems to be:
52
53 NV4 to NV30:
54
55 15: 0 instance_addr >> 4
56 17:16 engine (here uses 1 = graphics)
57 28:24 channel id (here uses 0)
58 31 valid (use 1)
59
60 NV40:
61
62 15: 0 instance_addr >> 4 (maybe 19-0)
63 21:20 engine (here uses 1 = graphics)
64 I'm unsure about the other bits, but using 0 seems to work.
65
66 The key into the hash table depends on the object handle and channel id and
67 is given as:
68*/
Ben Skeggs6ee73862009-12-11 19:24:15 +100069
70int
71nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
72 uint32_t size, int align, uint32_t flags,
73 struct nouveau_gpuobj **gpuobj_ret)
74{
75 struct drm_nouveau_private *dev_priv = dev->dev_private;
76 struct nouveau_engine *engine = &dev_priv->engine;
77 struct nouveau_gpuobj *gpuobj;
Ben Skeggs5125bfd2010-09-01 15:24:33 +100078 struct drm_mm_node *ramin = NULL;
Ben Skeggs6ee73862009-12-11 19:24:15 +100079 int ret;
80
81 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
82 chan ? chan->id : -1, size, align, flags);
83
84 if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
85 return -EINVAL;
86
87 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
88 if (!gpuobj)
89 return -ENOMEM;
90 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
Ben Skeggsb3beb162010-09-01 15:24:29 +100091 gpuobj->dev = dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +100092 gpuobj->flags = flags;
Ben Skeggseb9bcbd2010-09-01 15:24:37 +100093 kref_init(&gpuobj->refcount);
Ben Skeggs43efc9c2010-09-01 15:24:32 +100094 gpuobj->size = size;
Ben Skeggs6ee73862009-12-11 19:24:15 +100095
96 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
97
Ben Skeggs6ee73862009-12-11 19:24:15 +100098 if (chan) {
Ben Skeggs816544b2010-07-08 13:15:05 +100099 NV_DEBUG(dev, "channel heap\n");
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000100
101 ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
102 if (ramin)
103 ramin = drm_mm_get_block(ramin, size, align);
104
105 if (!ramin) {
106 nouveau_gpuobj_ref(NULL, &gpuobj);
107 return -ENOMEM;
108 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000109 } else {
110 NV_DEBUG(dev, "global heap\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000111
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000112 /* allocate backing pages, sets vinst */
Ben Skeggs6ee73862009-12-11 19:24:15 +1000113 ret = engine->instmem.populate(dev, gpuobj, &size);
114 if (ret) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000115 nouveau_gpuobj_ref(NULL, &gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000116 return ret;
117 }
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000118
119 /* try and get aperture space */
120 ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
121 if (ramin)
122 ramin = drm_mm_get_block(ramin, size, align);
123
124 /* on nv50 it's ok to fail, we have a fallback path */
125 if (!ramin && dev_priv->card_type < NV_50) {
126 nouveau_gpuobj_ref(NULL, &gpuobj);
127 return -ENOMEM;
128 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000129 }
130
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000131 /* if we got a chunk of the aperture, map pages into it */
132 gpuobj->im_pramin = ramin;
Ben Skeggsfbd2895e2010-09-01 15:24:34 +1000133 if (!chan && gpuobj->im_pramin && dev_priv->ramin_available) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000134 ret = engine->instmem.bind(dev, gpuobj);
135 if (ret) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000136 nouveau_gpuobj_ref(NULL, &gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000137 return ret;
138 }
139 }
140
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000141 /* calculate the various different addresses for the object */
142 if (chan) {
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000143 gpuobj->pinst = chan->ramin->pinst;
144 if (gpuobj->pinst != ~0)
145 gpuobj->pinst += gpuobj->im_pramin->start;
146
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000147 if (dev_priv->card_type < NV_50) {
148 gpuobj->cinst = gpuobj->pinst;
149 } else {
150 gpuobj->cinst = gpuobj->im_pramin->start;
151 gpuobj->vinst = gpuobj->im_pramin->start +
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000152 chan->ramin->vinst;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000153 }
154 } else {
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000155 if (gpuobj->im_pramin)
156 gpuobj->pinst = gpuobj->im_pramin->start;
157 else
158 gpuobj->pinst = ~0;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000159 gpuobj->cinst = 0xdeadbeef;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000160 }
161
Ben Skeggs6ee73862009-12-11 19:24:15 +1000162 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
163 int i;
164
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000165 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000166 nv_wo32(gpuobj, i, 0);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000167 engine->instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000168 }
169
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000170
Ben Skeggs6ee73862009-12-11 19:24:15 +1000171 *gpuobj_ret = gpuobj;
172 return 0;
173}
174
175int
Ben Skeggsfbd2895e2010-09-01 15:24:34 +1000176nouveau_gpuobj_init(struct drm_device *dev)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000177{
178 struct drm_nouveau_private *dev_priv = dev->dev_private;
179
180 NV_DEBUG(dev, "\n");
181
182 INIT_LIST_HEAD(&dev_priv->gpuobj_list);
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000183 spin_lock_init(&dev_priv->ramin_lock);
184 dev_priv->ramin_base = ~0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000185
186 return 0;
187}
188
Ben Skeggs6ee73862009-12-11 19:24:15 +1000189void
190nouveau_gpuobj_takedown(struct drm_device *dev)
191{
192 struct drm_nouveau_private *dev_priv = dev->dev_private;
193
194 NV_DEBUG(dev, "\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000195}
196
197void
198nouveau_gpuobj_late_takedown(struct drm_device *dev)
199{
200 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000201
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000202 BUG_ON(!list_empty(&dev_priv->gpuobj_list));
Ben Skeggs6ee73862009-12-11 19:24:15 +1000203}
204
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000205static void
206nouveau_gpuobj_del(struct kref *ref)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000207{
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000208 struct nouveau_gpuobj *gpuobj =
209 container_of(ref, struct nouveau_gpuobj, refcount);
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000210 struct drm_device *dev = gpuobj->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000211 struct drm_nouveau_private *dev_priv = dev->dev_private;
212 struct nouveau_engine *engine = &dev_priv->engine;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000213 int i;
214
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000215 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000216
217 if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000218 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000219 nv_wo32(gpuobj, i, 0);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000220 engine->instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000221 }
222
223 if (gpuobj->dtor)
224 gpuobj->dtor(dev, gpuobj);
225
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000226 if (gpuobj->im_backing)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000227 engine->instmem.clear(dev, gpuobj);
228
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000229 if (gpuobj->im_pramin)
230 drm_mm_put_block(gpuobj->im_pramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000231
232 list_del(&gpuobj->list);
233
Ben Skeggs6ee73862009-12-11 19:24:15 +1000234 kfree(gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000235}
236
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000237void
238nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000239{
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000240 if (ref)
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000241 kref_get(&ref->refcount);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000242
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000243 if (*ptr)
244 kref_put(&(*ptr)->refcount, nouveau_gpuobj_del);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000245
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000246 *ptr = ref;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000247}
248
249int
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000250nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
251 u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000252{
253 struct drm_nouveau_private *dev_priv = dev->dev_private;
254 struct nouveau_gpuobj *gpuobj = NULL;
255 int i;
256
257 NV_DEBUG(dev,
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000258 "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",
259 pinst, vinst, size, flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000260
261 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
262 if (!gpuobj)
263 return -ENOMEM;
264 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000265 gpuobj->dev = dev;
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000266 gpuobj->flags = flags;
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000267 kref_init(&gpuobj->refcount);
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000268 gpuobj->size = size;
269 gpuobj->pinst = pinst;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000270 gpuobj->cinst = 0xdeadbeef;
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000271 gpuobj->vinst = vinst;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000272
Ben Skeggs6ee73862009-12-11 19:24:15 +1000273 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000274 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000275 nv_wo32(gpuobj, i, 0);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000276 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000277 }
278
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000279 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
280 *pgpuobj = gpuobj;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000281 return 0;
282}
283
284
285static uint32_t
286nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
287{
288 struct drm_nouveau_private *dev_priv = dev->dev_private;
289
290 /*XXX: dodgy hack for now */
291 if (dev_priv->card_type >= NV_50)
292 return 24;
293 if (dev_priv->card_type >= NV_40)
294 return 32;
295 return 16;
296}
297
298/*
299 DMA objects are used to reference a piece of memory in the
300 framebuffer, PCI or AGP address space. Each object is 16 bytes big
301 and looks as follows:
302
303 entry[0]
304 11:0 class (seems like I can always use 0 here)
305 12 page table present?
306 13 page entry linear?
307 15:14 access: 0 rw, 1 ro, 2 wo
308 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
309 31:20 dma adjust (bits 0-11 of the address)
310 entry[1]
311 dma limit (size of transfer)
312 entry[X]
313 1 0 readonly, 1 readwrite
314 31:12 dma frame address of the page (bits 12-31 of the address)
315 entry[N]
316 page table terminator, same value as the first pte, as does nvidia
317 rivatv uses 0xffffffff
318
319 Non linear page tables need a list of frame addresses afterwards,
320 the rivatv project has some info on this.
321
322 The method below creates a DMA object in instance RAM and returns a handle
323 to it that can be used to set up context objects.
324*/
325int
326nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
327 uint64_t offset, uint64_t size, int access,
328 int target, struct nouveau_gpuobj **gpuobj)
329{
330 struct drm_device *dev = chan->dev;
331 struct drm_nouveau_private *dev_priv = dev->dev_private;
332 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
333 int ret;
334
335 NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
336 chan->id, class, offset, size);
337 NV_DEBUG(dev, "access=%d target=%d\n", access, target);
338
339 switch (target) {
340 case NV_DMA_TARGET_AGP:
341 offset += dev_priv->gart_info.aper_base;
342 break;
343 default:
344 break;
345 }
346
347 ret = nouveau_gpuobj_new(dev, chan,
348 nouveau_gpuobj_class_instmem_size(dev, class),
349 16, NVOBJ_FLAG_ZERO_ALLOC |
350 NVOBJ_FLAG_ZERO_FREE, gpuobj);
351 if (ret) {
352 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
353 return ret;
354 }
355
Ben Skeggs6ee73862009-12-11 19:24:15 +1000356 if (dev_priv->card_type < NV_50) {
357 uint32_t frame, adjust, pte_flags = 0;
358
359 if (access != NV_DMA_ACCESS_RO)
360 pte_flags |= (1<<1);
361 adjust = offset & 0x00000fff;
362 frame = offset & ~0x00000fff;
363
Ben Skeggsb3beb162010-09-01 15:24:29 +1000364 nv_wo32(*gpuobj, 0, ((1<<12) | (1<<13) | (adjust << 20) |
365 (access << 14) | (target << 16) |
366 class));
367 nv_wo32(*gpuobj, 4, size - 1);
368 nv_wo32(*gpuobj, 8, frame | pte_flags);
369 nv_wo32(*gpuobj, 12, frame | pte_flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000370 } else {
371 uint64_t limit = offset + size - 1;
372 uint32_t flags0, flags5;
373
374 if (target == NV_DMA_TARGET_VIDMEM) {
375 flags0 = 0x00190000;
376 flags5 = 0x00010000;
377 } else {
378 flags0 = 0x7fc00000;
379 flags5 = 0x00080000;
380 }
381
Ben Skeggsb3beb162010-09-01 15:24:29 +1000382 nv_wo32(*gpuobj, 0, flags0 | class);
383 nv_wo32(*gpuobj, 4, lower_32_bits(limit));
384 nv_wo32(*gpuobj, 8, lower_32_bits(offset));
385 nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) |
386 (upper_32_bits(offset) & 0xff));
387 nv_wo32(*gpuobj, 20, flags5);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000388 }
389
Ben Skeggsf56cb862010-07-08 11:29:10 +1000390 instmem->flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000391
392 (*gpuobj)->engine = NVOBJ_ENGINE_SW;
393 (*gpuobj)->class = class;
394 return 0;
395}
396
397int
398nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
399 uint64_t offset, uint64_t size, int access,
400 struct nouveau_gpuobj **gpuobj,
401 uint32_t *o_ret)
402{
403 struct drm_device *dev = chan->dev;
404 struct drm_nouveau_private *dev_priv = dev->dev_private;
405 int ret;
406
407 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
408 (dev_priv->card_type >= NV_50 &&
409 dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
410 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
411 offset + dev_priv->vm_gart_base,
412 size, access, NV_DMA_TARGET_AGP,
413 gpuobj);
414 if (o_ret)
415 *o_ret = 0;
416 } else
417 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000418 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000419 if (offset & ~0xffffffffULL) {
420 NV_ERROR(dev, "obj offset exceeds 32-bits\n");
421 return -EINVAL;
422 }
423 if (o_ret)
424 *o_ret = (uint32_t)offset;
425 ret = (*gpuobj != NULL) ? 0 : -EINVAL;
426 } else {
427 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
428 return -EINVAL;
429 }
430
431 return ret;
432}
433
434/* Context objects in the instance RAM have the following structure.
435 * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
436
437 NV4 - NV30:
438
439 entry[0]
440 11:0 class
441 12 chroma key enable
442 13 user clip enable
443 14 swizzle enable
444 17:15 patch config:
445 scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
446 18 synchronize enable
447 19 endian: 1 big, 0 little
448 21:20 dither mode
449 23 single step enable
450 24 patch status: 0 invalid, 1 valid
451 25 context_surface 0: 1 valid
452 26 context surface 1: 1 valid
453 27 context pattern: 1 valid
454 28 context rop: 1 valid
455 29,30 context beta, beta4
456 entry[1]
457 7:0 mono format
458 15:8 color format
459 31:16 notify instance address
460 entry[2]
461 15:0 dma 0 instance address
462 31:16 dma 1 instance address
463 entry[3]
464 dma method traps
465
466 NV40:
467 No idea what the exact format is. Here's what can be deducted:
468
469 entry[0]:
470 11:0 class (maybe uses more bits here?)
471 17 user clip enable
472 21:19 patch config
473 25 patch status valid ?
474 entry[1]:
475 15:0 DMA notifier (maybe 20:0)
476 entry[2]:
477 15:0 DMA 0 instance (maybe 20:0)
478 24 big endian
479 entry[3]:
480 15:0 DMA 1 instance (maybe 20:0)
481 entry[4]:
482 entry[5]:
483 set to 0?
484*/
485int
486nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
487 struct nouveau_gpuobj **gpuobj)
488{
489 struct drm_device *dev = chan->dev;
490 struct drm_nouveau_private *dev_priv = dev->dev_private;
491 int ret;
492
493 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
494
495 ret = nouveau_gpuobj_new(dev, chan,
496 nouveau_gpuobj_class_instmem_size(dev, class),
497 16,
498 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
499 gpuobj);
500 if (ret) {
501 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
502 return ret;
503 }
504
Ben Skeggs6ee73862009-12-11 19:24:15 +1000505 if (dev_priv->card_type >= NV_50) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000506 nv_wo32(*gpuobj, 0, class);
507 nv_wo32(*gpuobj, 20, 0x00010000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000508 } else {
509 switch (class) {
510 case NV_CLASS_NULL:
Ben Skeggsb3beb162010-09-01 15:24:29 +1000511 nv_wo32(*gpuobj, 0, 0x00001030);
512 nv_wo32(*gpuobj, 4, 0xFFFFFFFF);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000513 break;
514 default:
515 if (dev_priv->card_type >= NV_40) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000516 nv_wo32(*gpuobj, 0, class);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000517#ifdef __BIG_ENDIAN
Ben Skeggsb3beb162010-09-01 15:24:29 +1000518 nv_wo32(*gpuobj, 8, 0x01000000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000519#endif
520 } else {
521#ifdef __BIG_ENDIAN
Ben Skeggsb3beb162010-09-01 15:24:29 +1000522 nv_wo32(*gpuobj, 0, class | 0x00080000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000523#else
Ben Skeggsb3beb162010-09-01 15:24:29 +1000524 nv_wo32(*gpuobj, 0, class);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000525#endif
526 }
527 }
528 }
Ben Skeggsf56cb862010-07-08 11:29:10 +1000529 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000530
531 (*gpuobj)->engine = NVOBJ_ENGINE_GR;
532 (*gpuobj)->class = class;
533 return 0;
534}
535
Francisco Jerezf03a314b2009-12-26 02:42:45 +0100536int
Ben Skeggs6ee73862009-12-11 19:24:15 +1000537nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
538 struct nouveau_gpuobj **gpuobj_ret)
539{
Marcin Slusarzdd19e442010-01-30 15:41:00 +0100540 struct drm_nouveau_private *dev_priv;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000541 struct nouveau_gpuobj *gpuobj;
542
543 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
544 return -EINVAL;
Marcin Slusarzdd19e442010-01-30 15:41:00 +0100545 dev_priv = chan->dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000546
547 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
548 if (!gpuobj)
549 return -ENOMEM;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000550 gpuobj->dev = chan->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000551 gpuobj->engine = NVOBJ_ENGINE_SW;
552 gpuobj->class = class;
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000553 kref_init(&gpuobj->refcount);
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000554 gpuobj->cinst = 0x40;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000555
556 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
557 *gpuobj_ret = gpuobj;
558 return 0;
559}
560
561static int
562nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
563{
564 struct drm_device *dev = chan->dev;
565 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000566 uint32_t size;
567 uint32_t base;
568 int ret;
569
570 NV_DEBUG(dev, "ch%d\n", chan->id);
571
572 /* Base amount for object storage (4KiB enough?) */
573 size = 0x1000;
574 base = 0;
575
576 /* PGRAPH context */
Ben Skeggs816544b2010-07-08 13:15:05 +1000577 size += dev_priv->engine.graph.grctx_size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000578
579 if (dev_priv->card_type == NV_50) {
580 /* Various fixed table thingos */
581 size += 0x1400; /* mostly unknown stuff */
582 size += 0x4000; /* vm pd */
583 base = 0x6000;
584 /* RAMHT, not sure about setting size yet, 32KiB to be safe */
585 size += 0x8000;
586 /* RAMFC */
587 size += 0x1000;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000588 }
589
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000590 ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000591 if (ret) {
592 NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
593 return ret;
594 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000595
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000596 ret = drm_mm_init(&chan->ramin_heap, base, size);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000597 if (ret) {
598 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000599 nouveau_gpuobj_ref(NULL, &chan->ramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000600 return ret;
601 }
602
603 return 0;
604}
605
606int
607nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
608 uint32_t vram_h, uint32_t tt_h)
609{
610 struct drm_device *dev = chan->dev;
611 struct drm_nouveau_private *dev_priv = dev->dev_private;
612 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
613 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
614 int ret, i;
615
Ben Skeggs6ee73862009-12-11 19:24:15 +1000616 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
617
Ben Skeggs816544b2010-07-08 13:15:05 +1000618 /* Allocate a chunk of memory for per-channel object storage */
619 ret = nouveau_gpuobj_channel_init_pramin(chan);
620 if (ret) {
621 NV_ERROR(dev, "init pramin\n");
622 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000623 }
624
625 /* NV50 VM
626 * - Allocate per-channel page-directory
627 * - Map GART and VRAM into the channel's address space at the
628 * locations determined during init.
629 */
630 if (dev_priv->card_type >= NV_50) {
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000631 u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
632 u64 vm_vinst = chan->ramin->vinst + pgd_offs;
633 u32 vm_pinst = chan->ramin->pinst;
634 u32 pde;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000635
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000636 if (vm_pinst != ~0)
637 vm_pinst += pgd_offs;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000638
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000639 ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000,
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000640 0, &chan->vm_pd);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000641 if (ret)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000642 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000643 for (i = 0; i < 0x4000; i += 8) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000644 nv_wo32(chan->vm_pd, i + 0, 0x00000000);
645 nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000646 }
647
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000648 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma,
649 &chan->vm_gart_pt);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000650 pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000651 nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000652 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000653
Ben Skeggsb3beb162010-09-01 15:24:29 +1000654 pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000655 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000656 nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i],
657 &chan->vm_vram_pt[i]);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000658
Ben Skeggsb3beb162010-09-01 15:24:29 +1000659 nv_wo32(chan->vm_pd, pde + 0,
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000660 chan->vm_vram_pt[i]->vinst | 0x61);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000661 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
662 pde += 8;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000663 }
664
Ben Skeggsf56cb862010-07-08 11:29:10 +1000665 instmem->flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000666 }
667
668 /* RAMHT */
669 if (dev_priv->card_type < NV_50) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000670 nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
671 } else {
672 struct nouveau_gpuobj *ramht = NULL;
673
674 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
675 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000676 if (ret)
677 return ret;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000678
679 ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
680 nouveau_gpuobj_ref(NULL, &ramht);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000681 if (ret)
682 return ret;
683 }
684
685 /* VRAM ctxdma */
686 if (dev_priv->card_type >= NV_50) {
687 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
688 0, dev_priv->vm_end,
689 NV_DMA_ACCESS_RW,
690 NV_DMA_TARGET_AGP, &vram);
691 if (ret) {
692 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
693 return ret;
694 }
695 } else {
696 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000697 0, dev_priv->fb_available_size,
698 NV_DMA_ACCESS_RW,
699 NV_DMA_TARGET_VIDMEM, &vram);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000700 if (ret) {
701 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
702 return ret;
703 }
704 }
705
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000706 ret = nouveau_ramht_insert(chan, vram_h, vram);
707 nouveau_gpuobj_ref(NULL, &vram);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000708 if (ret) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000709 NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000710 return ret;
711 }
712
713 /* TT memory ctxdma */
714 if (dev_priv->card_type >= NV_50) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000715 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
716 0, dev_priv->vm_end,
717 NV_DMA_ACCESS_RW,
718 NV_DMA_TARGET_AGP, &tt);
719 if (ret) {
720 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
721 return ret;
722 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000723 } else
724 if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
725 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
726 dev_priv->gart_info.aper_size,
727 NV_DMA_ACCESS_RW, &tt, NULL);
728 } else {
729 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
730 ret = -EINVAL;
731 }
732
733 if (ret) {
734 NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
735 return ret;
736 }
737
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000738 ret = nouveau_ramht_insert(chan, tt_h, tt);
739 nouveau_gpuobj_ref(NULL, &tt);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000740 if (ret) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000741 NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000742 return ret;
743 }
744
745 return 0;
746}
747
748void
749nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
750{
751 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
752 struct drm_device *dev = chan->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000753 int i;
754
755 NV_DEBUG(dev, "ch%d\n", chan->id);
756
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000757 if (!chan->ramht)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000758 return;
759
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000760 nouveau_ramht_ref(NULL, &chan->ramht, chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000761
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000762 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
763 nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000764 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000765 nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000766
Ben Skeggsb833ac22010-06-01 15:32:24 +1000767 if (chan->ramin_heap.free_stack.next)
768 drm_mm_takedown(&chan->ramin_heap);
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000769 nouveau_gpuobj_ref(NULL, &chan->ramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000770}
771
772int
773nouveau_gpuobj_suspend(struct drm_device *dev)
774{
775 struct drm_nouveau_private *dev_priv = dev->dev_private;
776 struct nouveau_gpuobj *gpuobj;
777 int i;
778
779 if (dev_priv->card_type < NV_50) {
780 dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram);
781 if (!dev_priv->susres.ramin_copy)
782 return -ENOMEM;
783
784 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
785 dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i);
786 return 0;
787 }
788
789 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000790 if (!gpuobj->im_backing)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000791 continue;
792
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000793 gpuobj->im_backing_suspend = vmalloc(gpuobj->size);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000794 if (!gpuobj->im_backing_suspend) {
795 nouveau_gpuobj_resume(dev);
796 return -ENOMEM;
797 }
798
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000799 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000800 gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000801 }
802
803 return 0;
804}
805
806void
807nouveau_gpuobj_suspend_cleanup(struct drm_device *dev)
808{
809 struct drm_nouveau_private *dev_priv = dev->dev_private;
810 struct nouveau_gpuobj *gpuobj;
811
812 if (dev_priv->card_type < NV_50) {
813 vfree(dev_priv->susres.ramin_copy);
814 dev_priv->susres.ramin_copy = NULL;
815 return;
816 }
817
818 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
819 if (!gpuobj->im_backing_suspend)
820 continue;
821
822 vfree(gpuobj->im_backing_suspend);
823 gpuobj->im_backing_suspend = NULL;
824 }
825}
826
827void
828nouveau_gpuobj_resume(struct drm_device *dev)
829{
830 struct drm_nouveau_private *dev_priv = dev->dev_private;
831 struct nouveau_gpuobj *gpuobj;
832 int i;
833
834 if (dev_priv->card_type < NV_50) {
835 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
836 nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]);
837 nouveau_gpuobj_suspend_cleanup(dev);
838 return;
839 }
840
841 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
842 if (!gpuobj->im_backing_suspend)
843 continue;
844
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000845 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000846 nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000847 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000848 }
849
850 nouveau_gpuobj_suspend_cleanup(dev);
851}
852
853int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
854 struct drm_file *file_priv)
855{
856 struct drm_nouveau_private *dev_priv = dev->dev_private;
857 struct drm_nouveau_grobj_alloc *init = data;
858 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
859 struct nouveau_pgraph_object_class *grc;
860 struct nouveau_gpuobj *gr = NULL;
861 struct nouveau_channel *chan;
862 int ret;
863
Ben Skeggs6ee73862009-12-11 19:24:15 +1000864 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
865
866 if (init->handle == ~0)
867 return -EINVAL;
868
869 grc = pgraph->grclass;
870 while (grc->id) {
871 if (grc->id == init->class)
872 break;
873 grc++;
874 }
875
876 if (!grc->id) {
877 NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class);
878 return -EPERM;
879 }
880
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000881 if (nouveau_ramht_find(chan, init->handle))
Ben Skeggs6ee73862009-12-11 19:24:15 +1000882 return -EEXIST;
883
884 if (!grc->software)
885 ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
886 else
887 ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000888 if (ret) {
889 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
890 ret, init->channel, init->handle);
891 return ret;
892 }
893
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000894 ret = nouveau_ramht_insert(chan, init->handle, gr);
895 nouveau_gpuobj_ref(NULL, &gr);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000896 if (ret) {
897 NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
898 ret, init->channel, init->handle);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000899 return ret;
900 }
901
902 return 0;
903}
904
905int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
906 struct drm_file *file_priv)
907{
908 struct drm_nouveau_gpuobj_free *objfree = data;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000909 struct nouveau_gpuobj *gpuobj;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000910 struct nouveau_channel *chan;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000911
Ben Skeggs6ee73862009-12-11 19:24:15 +1000912 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
913
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000914 gpuobj = nouveau_ramht_find(chan, objfree->handle);
915 if (!gpuobj)
916 return -ENOENT;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000917
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000918 nouveau_ramht_remove(chan, objfree->handle);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000919 return 0;
920}
Ben Skeggsb3beb162010-09-01 15:24:29 +1000921
922u32
923nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
924{
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000925 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
926 struct drm_device *dev = gpuobj->dev;
927
928 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
929 u64 ptr = gpuobj->vinst + offset;
930 u32 base = ptr >> 16;
931 u32 val;
932
933 spin_lock(&dev_priv->ramin_lock);
934 if (dev_priv->ramin_base != base) {
935 dev_priv->ramin_base = base;
936 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
937 }
938 val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
939 spin_unlock(&dev_priv->ramin_lock);
940 return val;
941 }
942
943 return nv_ri32(dev, gpuobj->pinst + offset);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000944}
945
946void
947nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
948{
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000949 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
950 struct drm_device *dev = gpuobj->dev;
951
952 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
953 u64 ptr = gpuobj->vinst + offset;
954 u32 base = ptr >> 16;
955
956 spin_lock(&dev_priv->ramin_lock);
957 if (dev_priv->ramin_base != base) {
958 dev_priv->ramin_base = base;
959 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
960 }
961 nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
962 spin_unlock(&dev_priv->ramin_lock);
963 return;
964 }
965
966 nv_wi32(dev, gpuobj->pinst + offset, val);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000967}