blob: d55c50f1a2d31a02575d75e80dafdcde3d611916 [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright (C) 2006 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28/*
29 * Authors:
30 * Ben Skeggs <darktama@iinet.net.au>
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "nouveau_drv.h"
36#include "nouveau_drm.h"
Ben Skeggs479dcae2010-09-01 15:24:28 +100037#include "nouveau_ramht.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100038
39/* NVidia uses context objects to drive drawing operations.
40
41 Context objects can be selected into 8 subchannels in the FIFO,
42 and then used via DMA command buffers.
43
44 A context object is referenced by a user defined handle (CARD32). The HW
45 looks up graphics objects in a hash table in the instance RAM.
46
47 An entry in the hash table consists of 2 CARD32. The first CARD32 contains
48 the handle, the second one a bitfield, that contains the address of the
49 object in instance RAM.
50
51 The format of the second CARD32 seems to be:
52
53 NV4 to NV30:
54
55 15: 0 instance_addr >> 4
56 17:16 engine (here uses 1 = graphics)
57 28:24 channel id (here uses 0)
58 31 valid (use 1)
59
60 NV40:
61
62 15: 0 instance_addr >> 4 (maybe 19-0)
63 21:20 engine (here uses 1 = graphics)
64 I'm unsure about the other bits, but using 0 seems to work.
65
66 The key into the hash table depends on the object handle and channel id and
67 is given as:
68*/
Ben Skeggs6ee73862009-12-11 19:24:15 +100069
70int
71nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
72 uint32_t size, int align, uint32_t flags,
73 struct nouveau_gpuobj **gpuobj_ret)
74{
75 struct drm_nouveau_private *dev_priv = dev->dev_private;
76 struct nouveau_engine *engine = &dev_priv->engine;
77 struct nouveau_gpuobj *gpuobj;
Ben Skeggsb833ac22010-06-01 15:32:24 +100078 struct drm_mm *pramin = NULL;
Ben Skeggs6ee73862009-12-11 19:24:15 +100079 int ret;
80
81 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
82 chan ? chan->id : -1, size, align, flags);
83
84 if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
85 return -EINVAL;
86
87 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
88 if (!gpuobj)
89 return -ENOMEM;
90 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
Ben Skeggsb3beb162010-09-01 15:24:29 +100091 gpuobj->dev = dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +100092 gpuobj->flags = flags;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +100093 gpuobj->refcount = 1;
Ben Skeggs6ee73862009-12-11 19:24:15 +100094
95 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
96
97 /* Choose between global instmem heap, and per-channel private
98 * instmem heap. On <NV50 allow requests for private instmem
99 * to be satisfied from global heap if no per-channel area
100 * available.
101 */
102 if (chan) {
Ben Skeggs816544b2010-07-08 13:15:05 +1000103 NV_DEBUG(dev, "channel heap\n");
104 pramin = &chan->ramin_heap;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000105 } else {
106 NV_DEBUG(dev, "global heap\n");
Ben Skeggsb833ac22010-06-01 15:32:24 +1000107 pramin = &dev_priv->ramin_heap;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000108
Ben Skeggs6ee73862009-12-11 19:24:15 +1000109 ret = engine->instmem.populate(dev, gpuobj, &size);
110 if (ret) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000111 nouveau_gpuobj_ref(NULL, &gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000112 return ret;
113 }
114 }
115
116 /* Allocate a chunk of the PRAMIN aperture */
Ben Skeggsb833ac22010-06-01 15:32:24 +1000117 gpuobj->im_pramin = drm_mm_search_free(pramin, size, align, 0);
118 if (gpuobj->im_pramin)
119 gpuobj->im_pramin = drm_mm_get_block(gpuobj->im_pramin, size, align);
120
Ben Skeggs6ee73862009-12-11 19:24:15 +1000121 if (!gpuobj->im_pramin) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000122 nouveau_gpuobj_ref(NULL, &gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000123 return -ENOMEM;
124 }
125
126 if (!chan) {
127 ret = engine->instmem.bind(dev, gpuobj);
128 if (ret) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000129 nouveau_gpuobj_ref(NULL, &gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000130 return ret;
131 }
132 }
133
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000134 /* calculate the various different addresses for the object */
135 if (chan) {
136 gpuobj->pinst = gpuobj->im_pramin->start +
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000137 chan->ramin->im_pramin->start;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000138 if (dev_priv->card_type < NV_50) {
139 gpuobj->cinst = gpuobj->pinst;
140 } else {
141 gpuobj->cinst = gpuobj->im_pramin->start;
142 gpuobj->vinst = gpuobj->im_pramin->start +
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000143 chan->ramin->im_backing_start;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000144 }
145 } else {
146 gpuobj->pinst = gpuobj->im_pramin->start;
147 gpuobj->cinst = 0xdeadbeef;
148 gpuobj->vinst = gpuobj->im_backing_start;
149 }
150
Ben Skeggs6ee73862009-12-11 19:24:15 +1000151 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
152 int i;
153
Ben Skeggs6ee73862009-12-11 19:24:15 +1000154 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000155 nv_wo32(gpuobj, i, 0);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000156 engine->instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000157 }
158
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000159
Ben Skeggs6ee73862009-12-11 19:24:15 +1000160 *gpuobj_ret = gpuobj;
161 return 0;
162}
163
164int
165nouveau_gpuobj_early_init(struct drm_device *dev)
166{
167 struct drm_nouveau_private *dev_priv = dev->dev_private;
168
169 NV_DEBUG(dev, "\n");
170
171 INIT_LIST_HEAD(&dev_priv->gpuobj_list);
172
173 return 0;
174}
175
176int
177nouveau_gpuobj_init(struct drm_device *dev)
178{
179 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000180 struct nouveau_gpuobj *ramht = NULL;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000181 int ret;
182
183 NV_DEBUG(dev, "\n");
184
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000185 if (dev_priv->card_type >= NV_50)
186 return 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000187
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000188 ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset, ~0,
189 dev_priv->ramht_size,
190 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
191 if (ret)
192 return ret;
193
194 ret = nouveau_ramht_new(dev, ramht, &dev_priv->ramht);
195 nouveau_gpuobj_ref(NULL, &ramht);
196 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000197}
198
199void
200nouveau_gpuobj_takedown(struct drm_device *dev)
201{
202 struct drm_nouveau_private *dev_priv = dev->dev_private;
203
204 NV_DEBUG(dev, "\n");
205
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000206 nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000207}
208
209void
210nouveau_gpuobj_late_takedown(struct drm_device *dev)
211{
212 struct drm_nouveau_private *dev_priv = dev->dev_private;
213 struct nouveau_gpuobj *gpuobj = NULL;
214 struct list_head *entry, *tmp;
215
216 NV_DEBUG(dev, "\n");
217
218 list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) {
219 gpuobj = list_entry(entry, struct nouveau_gpuobj, list);
220
221 NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n",
222 gpuobj, gpuobj->refcount);
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000223
224 gpuobj->refcount = 1;
225 nouveau_gpuobj_ref(NULL, &gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000226 }
227}
228
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000229static int
230nouveau_gpuobj_del(struct nouveau_gpuobj *gpuobj)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000231{
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000232 struct drm_device *dev = gpuobj->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000233 struct drm_nouveau_private *dev_priv = dev->dev_private;
234 struct nouveau_engine *engine = &dev_priv->engine;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000235 int i;
236
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000237 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000238
239 if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000240 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000241 nv_wo32(gpuobj, i, 0);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000242 engine->instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000243 }
244
245 if (gpuobj->dtor)
246 gpuobj->dtor(dev, gpuobj);
247
248 if (gpuobj->im_backing && !(gpuobj->flags & NVOBJ_FLAG_FAKE))
249 engine->instmem.clear(dev, gpuobj);
250
251 if (gpuobj->im_pramin) {
252 if (gpuobj->flags & NVOBJ_FLAG_FAKE)
253 kfree(gpuobj->im_pramin);
254 else
Ben Skeggsb833ac22010-06-01 15:32:24 +1000255 drm_mm_put_block(gpuobj->im_pramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000256 }
257
258 list_del(&gpuobj->list);
259
Ben Skeggs6ee73862009-12-11 19:24:15 +1000260 kfree(gpuobj);
261 return 0;
262}
263
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000264void
265nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000266{
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000267 if (ref)
268 ref->refcount++;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000269
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000270 if (*ptr && --(*ptr)->refcount == 0)
271 nouveau_gpuobj_del(*ptr);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000272
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000273 *ptr = ref;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000274}
275
276int
277nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
278 uint32_t b_offset, uint32_t size,
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000279 uint32_t flags, struct nouveau_gpuobj **pgpuobj)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000280{
281 struct drm_nouveau_private *dev_priv = dev->dev_private;
282 struct nouveau_gpuobj *gpuobj = NULL;
283 int i;
284
285 NV_DEBUG(dev,
286 "p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n",
287 p_offset, b_offset, size, flags);
288
289 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
290 if (!gpuobj)
291 return -ENOMEM;
292 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000293 gpuobj->dev = dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000294 gpuobj->flags = flags | NVOBJ_FLAG_FAKE;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000295 gpuobj->refcount = 1;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000296
297 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
298
299 if (p_offset != ~0) {
Ben Skeggsb833ac22010-06-01 15:32:24 +1000300 gpuobj->im_pramin = kzalloc(sizeof(struct drm_mm_node),
Ben Skeggs6ee73862009-12-11 19:24:15 +1000301 GFP_KERNEL);
302 if (!gpuobj->im_pramin) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000303 nouveau_gpuobj_ref(NULL, &gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000304 return -ENOMEM;
305 }
306 gpuobj->im_pramin->start = p_offset;
307 gpuobj->im_pramin->size = size;
308 }
309
310 if (b_offset != ~0) {
311 gpuobj->im_backing = (struct nouveau_bo *)-1;
312 gpuobj->im_backing_start = b_offset;
313 }
314
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000315 gpuobj->pinst = gpuobj->im_pramin->start;
316 gpuobj->cinst = 0xdeadbeef;
317 gpuobj->vinst = gpuobj->im_backing_start;
318
Ben Skeggs6ee73862009-12-11 19:24:15 +1000319 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000320 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000321 nv_wo32(gpuobj, i, 0);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000322 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000323 }
324
Ben Skeggs6ee73862009-12-11 19:24:15 +1000325 if (pgpuobj)
326 *pgpuobj = gpuobj;
327 return 0;
328}
329
330
331static uint32_t
332nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
333{
334 struct drm_nouveau_private *dev_priv = dev->dev_private;
335
336 /*XXX: dodgy hack for now */
337 if (dev_priv->card_type >= NV_50)
338 return 24;
339 if (dev_priv->card_type >= NV_40)
340 return 32;
341 return 16;
342}
343
344/*
345 DMA objects are used to reference a piece of memory in the
346 framebuffer, PCI or AGP address space. Each object is 16 bytes big
347 and looks as follows:
348
349 entry[0]
350 11:0 class (seems like I can always use 0 here)
351 12 page table present?
352 13 page entry linear?
353 15:14 access: 0 rw, 1 ro, 2 wo
354 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
355 31:20 dma adjust (bits 0-11 of the address)
356 entry[1]
357 dma limit (size of transfer)
358 entry[X]
359 1 0 readonly, 1 readwrite
360 31:12 dma frame address of the page (bits 12-31 of the address)
361 entry[N]
362 page table terminator, same value as the first pte, as does nvidia
363 rivatv uses 0xffffffff
364
365 Non linear page tables need a list of frame addresses afterwards,
366 the rivatv project has some info on this.
367
368 The method below creates a DMA object in instance RAM and returns a handle
369 to it that can be used to set up context objects.
370*/
371int
372nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
373 uint64_t offset, uint64_t size, int access,
374 int target, struct nouveau_gpuobj **gpuobj)
375{
376 struct drm_device *dev = chan->dev;
377 struct drm_nouveau_private *dev_priv = dev->dev_private;
378 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
379 int ret;
380
381 NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
382 chan->id, class, offset, size);
383 NV_DEBUG(dev, "access=%d target=%d\n", access, target);
384
385 switch (target) {
386 case NV_DMA_TARGET_AGP:
387 offset += dev_priv->gart_info.aper_base;
388 break;
389 default:
390 break;
391 }
392
393 ret = nouveau_gpuobj_new(dev, chan,
394 nouveau_gpuobj_class_instmem_size(dev, class),
395 16, NVOBJ_FLAG_ZERO_ALLOC |
396 NVOBJ_FLAG_ZERO_FREE, gpuobj);
397 if (ret) {
398 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
399 return ret;
400 }
401
Ben Skeggs6ee73862009-12-11 19:24:15 +1000402 if (dev_priv->card_type < NV_50) {
403 uint32_t frame, adjust, pte_flags = 0;
404
405 if (access != NV_DMA_ACCESS_RO)
406 pte_flags |= (1<<1);
407 adjust = offset & 0x00000fff;
408 frame = offset & ~0x00000fff;
409
Ben Skeggsb3beb162010-09-01 15:24:29 +1000410 nv_wo32(*gpuobj, 0, ((1<<12) | (1<<13) | (adjust << 20) |
411 (access << 14) | (target << 16) |
412 class));
413 nv_wo32(*gpuobj, 4, size - 1);
414 nv_wo32(*gpuobj, 8, frame | pte_flags);
415 nv_wo32(*gpuobj, 12, frame | pte_flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000416 } else {
417 uint64_t limit = offset + size - 1;
418 uint32_t flags0, flags5;
419
420 if (target == NV_DMA_TARGET_VIDMEM) {
421 flags0 = 0x00190000;
422 flags5 = 0x00010000;
423 } else {
424 flags0 = 0x7fc00000;
425 flags5 = 0x00080000;
426 }
427
Ben Skeggsb3beb162010-09-01 15:24:29 +1000428 nv_wo32(*gpuobj, 0, flags0 | class);
429 nv_wo32(*gpuobj, 4, lower_32_bits(limit));
430 nv_wo32(*gpuobj, 8, lower_32_bits(offset));
431 nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) |
432 (upper_32_bits(offset) & 0xff));
433 nv_wo32(*gpuobj, 20, flags5);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000434 }
435
Ben Skeggsf56cb862010-07-08 11:29:10 +1000436 instmem->flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000437
438 (*gpuobj)->engine = NVOBJ_ENGINE_SW;
439 (*gpuobj)->class = class;
440 return 0;
441}
442
443int
444nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
445 uint64_t offset, uint64_t size, int access,
446 struct nouveau_gpuobj **gpuobj,
447 uint32_t *o_ret)
448{
449 struct drm_device *dev = chan->dev;
450 struct drm_nouveau_private *dev_priv = dev->dev_private;
451 int ret;
452
453 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
454 (dev_priv->card_type >= NV_50 &&
455 dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
456 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
457 offset + dev_priv->vm_gart_base,
458 size, access, NV_DMA_TARGET_AGP,
459 gpuobj);
460 if (o_ret)
461 *o_ret = 0;
462 } else
463 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000464 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000465 if (offset & ~0xffffffffULL) {
466 NV_ERROR(dev, "obj offset exceeds 32-bits\n");
467 return -EINVAL;
468 }
469 if (o_ret)
470 *o_ret = (uint32_t)offset;
471 ret = (*gpuobj != NULL) ? 0 : -EINVAL;
472 } else {
473 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
474 return -EINVAL;
475 }
476
477 return ret;
478}
479
480/* Context objects in the instance RAM have the following structure.
481 * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
482
483 NV4 - NV30:
484
485 entry[0]
486 11:0 class
487 12 chroma key enable
488 13 user clip enable
489 14 swizzle enable
490 17:15 patch config:
491 scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
492 18 synchronize enable
493 19 endian: 1 big, 0 little
494 21:20 dither mode
495 23 single step enable
496 24 patch status: 0 invalid, 1 valid
497 25 context_surface 0: 1 valid
498 26 context surface 1: 1 valid
499 27 context pattern: 1 valid
500 28 context rop: 1 valid
501 29,30 context beta, beta4
502 entry[1]
503 7:0 mono format
504 15:8 color format
505 31:16 notify instance address
506 entry[2]
507 15:0 dma 0 instance address
508 31:16 dma 1 instance address
509 entry[3]
510 dma method traps
511
512 NV40:
513 No idea what the exact format is. Here's what can be deducted:
514
515 entry[0]:
516 11:0 class (maybe uses more bits here?)
517 17 user clip enable
518 21:19 patch config
519 25 patch status valid ?
520 entry[1]:
521 15:0 DMA notifier (maybe 20:0)
522 entry[2]:
523 15:0 DMA 0 instance (maybe 20:0)
524 24 big endian
525 entry[3]:
526 15:0 DMA 1 instance (maybe 20:0)
527 entry[4]:
528 entry[5]:
529 set to 0?
530*/
531int
532nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
533 struct nouveau_gpuobj **gpuobj)
534{
535 struct drm_device *dev = chan->dev;
536 struct drm_nouveau_private *dev_priv = dev->dev_private;
537 int ret;
538
539 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
540
541 ret = nouveau_gpuobj_new(dev, chan,
542 nouveau_gpuobj_class_instmem_size(dev, class),
543 16,
544 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
545 gpuobj);
546 if (ret) {
547 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
548 return ret;
549 }
550
Ben Skeggs6ee73862009-12-11 19:24:15 +1000551 if (dev_priv->card_type >= NV_50) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000552 nv_wo32(*gpuobj, 0, class);
553 nv_wo32(*gpuobj, 20, 0x00010000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000554 } else {
555 switch (class) {
556 case NV_CLASS_NULL:
Ben Skeggsb3beb162010-09-01 15:24:29 +1000557 nv_wo32(*gpuobj, 0, 0x00001030);
558 nv_wo32(*gpuobj, 4, 0xFFFFFFFF);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000559 break;
560 default:
561 if (dev_priv->card_type >= NV_40) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000562 nv_wo32(*gpuobj, 0, class);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000563#ifdef __BIG_ENDIAN
Ben Skeggsb3beb162010-09-01 15:24:29 +1000564 nv_wo32(*gpuobj, 8, 0x01000000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000565#endif
566 } else {
567#ifdef __BIG_ENDIAN
Ben Skeggsb3beb162010-09-01 15:24:29 +1000568 nv_wo32(*gpuobj, 0, class | 0x00080000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000569#else
Ben Skeggsb3beb162010-09-01 15:24:29 +1000570 nv_wo32(*gpuobj, 0, class);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000571#endif
572 }
573 }
574 }
Ben Skeggsf56cb862010-07-08 11:29:10 +1000575 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000576
577 (*gpuobj)->engine = NVOBJ_ENGINE_GR;
578 (*gpuobj)->class = class;
579 return 0;
580}
581
Francisco Jerezf03a3142009-12-26 02:42:45 +0100582int
Ben Skeggs6ee73862009-12-11 19:24:15 +1000583nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
584 struct nouveau_gpuobj **gpuobj_ret)
585{
Marcin Slusarzdd19e442010-01-30 15:41:00 +0100586 struct drm_nouveau_private *dev_priv;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000587 struct nouveau_gpuobj *gpuobj;
588
589 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
590 return -EINVAL;
Marcin Slusarzdd19e442010-01-30 15:41:00 +0100591 dev_priv = chan->dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000592
593 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
594 if (!gpuobj)
595 return -ENOMEM;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000596 gpuobj->dev = chan->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000597 gpuobj->engine = NVOBJ_ENGINE_SW;
598 gpuobj->class = class;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000599 gpuobj->refcount = 1;
600 gpuobj->cinst = 0x40;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000601
602 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
603 *gpuobj_ret = gpuobj;
604 return 0;
605}
606
607static int
608nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
609{
610 struct drm_device *dev = chan->dev;
611 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000612 uint32_t size;
613 uint32_t base;
614 int ret;
615
616 NV_DEBUG(dev, "ch%d\n", chan->id);
617
618 /* Base amount for object storage (4KiB enough?) */
619 size = 0x1000;
620 base = 0;
621
622 /* PGRAPH context */
Ben Skeggs816544b2010-07-08 13:15:05 +1000623 size += dev_priv->engine.graph.grctx_size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000624
625 if (dev_priv->card_type == NV_50) {
626 /* Various fixed table thingos */
627 size += 0x1400; /* mostly unknown stuff */
628 size += 0x4000; /* vm pd */
629 base = 0x6000;
630 /* RAMHT, not sure about setting size yet, 32KiB to be safe */
631 size += 0x8000;
632 /* RAMFC */
633 size += 0x1000;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000634 }
635
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000636 ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000637 if (ret) {
638 NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
639 return ret;
640 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000641
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000642 ret = drm_mm_init(&chan->ramin_heap, base, size);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000643 if (ret) {
644 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000645 nouveau_gpuobj_ref(NULL, &chan->ramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000646 return ret;
647 }
648
649 return 0;
650}
651
652int
653nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
654 uint32_t vram_h, uint32_t tt_h)
655{
656 struct drm_device *dev = chan->dev;
657 struct drm_nouveau_private *dev_priv = dev->dev_private;
658 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
659 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
660 int ret, i;
661
Ben Skeggs6ee73862009-12-11 19:24:15 +1000662 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
663
Ben Skeggs816544b2010-07-08 13:15:05 +1000664 /* Allocate a chunk of memory for per-channel object storage */
665 ret = nouveau_gpuobj_channel_init_pramin(chan);
666 if (ret) {
667 NV_ERROR(dev, "init pramin\n");
668 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000669 }
670
671 /* NV50 VM
672 * - Allocate per-channel page-directory
673 * - Map GART and VRAM into the channel's address space at the
674 * locations determined during init.
675 */
676 if (dev_priv->card_type >= NV_50) {
677 uint32_t vm_offset, pde;
678
Ben Skeggs6ee73862009-12-11 19:24:15 +1000679 vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000680 vm_offset += chan->ramin->im_pramin->start;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000681
682 ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000683 0, &chan->vm_pd);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000684 if (ret)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000685 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000686 for (i = 0; i < 0x4000; i += 8) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000687 nv_wo32(chan->vm_pd, i + 0, 0x00000000);
688 nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000689 }
690
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000691 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma,
692 &chan->vm_gart_pt);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000693 pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000694 nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000695 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000696
Ben Skeggsb3beb162010-09-01 15:24:29 +1000697 pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000698 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000699 nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i],
700 &chan->vm_vram_pt[i]);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000701
Ben Skeggsb3beb162010-09-01 15:24:29 +1000702 nv_wo32(chan->vm_pd, pde + 0,
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000703 chan->vm_vram_pt[i]->vinst | 0x61);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000704 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
705 pde += 8;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000706 }
707
Ben Skeggsf56cb862010-07-08 11:29:10 +1000708 instmem->flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000709 }
710
711 /* RAMHT */
712 if (dev_priv->card_type < NV_50) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000713 nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
714 } else {
715 struct nouveau_gpuobj *ramht = NULL;
716
717 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
718 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000719 if (ret)
720 return ret;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000721
722 ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
723 nouveau_gpuobj_ref(NULL, &ramht);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000724 if (ret)
725 return ret;
726 }
727
728 /* VRAM ctxdma */
729 if (dev_priv->card_type >= NV_50) {
730 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
731 0, dev_priv->vm_end,
732 NV_DMA_ACCESS_RW,
733 NV_DMA_TARGET_AGP, &vram);
734 if (ret) {
735 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
736 return ret;
737 }
738 } else {
739 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000740 0, dev_priv->fb_available_size,
741 NV_DMA_ACCESS_RW,
742 NV_DMA_TARGET_VIDMEM, &vram);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000743 if (ret) {
744 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
745 return ret;
746 }
747 }
748
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000749 ret = nouveau_ramht_insert(chan, vram_h, vram);
750 nouveau_gpuobj_ref(NULL, &vram);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000751 if (ret) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000752 NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000753 return ret;
754 }
755
756 /* TT memory ctxdma */
757 if (dev_priv->card_type >= NV_50) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000758 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
759 0, dev_priv->vm_end,
760 NV_DMA_ACCESS_RW,
761 NV_DMA_TARGET_AGP, &tt);
762 if (ret) {
763 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
764 return ret;
765 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000766 } else
767 if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
768 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
769 dev_priv->gart_info.aper_size,
770 NV_DMA_ACCESS_RW, &tt, NULL);
771 } else {
772 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
773 ret = -EINVAL;
774 }
775
776 if (ret) {
777 NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
778 return ret;
779 }
780
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000781 ret = nouveau_ramht_insert(chan, tt_h, tt);
782 nouveau_gpuobj_ref(NULL, &tt);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000783 if (ret) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000784 NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000785 return ret;
786 }
787
788 return 0;
789}
790
791void
792nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
793{
794 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
795 struct drm_device *dev = chan->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000796 int i;
797
798 NV_DEBUG(dev, "ch%d\n", chan->id);
799
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000800 if (!chan->ramht)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000801 return;
802
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000803 nouveau_ramht_ref(NULL, &chan->ramht, chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000804
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000805 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
806 nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000807 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000808 nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000809
Ben Skeggsb833ac22010-06-01 15:32:24 +1000810 if (chan->ramin_heap.free_stack.next)
811 drm_mm_takedown(&chan->ramin_heap);
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000812 nouveau_gpuobj_ref(NULL, &chan->ramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000813}
814
815int
816nouveau_gpuobj_suspend(struct drm_device *dev)
817{
818 struct drm_nouveau_private *dev_priv = dev->dev_private;
819 struct nouveau_gpuobj *gpuobj;
820 int i;
821
822 if (dev_priv->card_type < NV_50) {
823 dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram);
824 if (!dev_priv->susres.ramin_copy)
825 return -ENOMEM;
826
827 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
828 dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i);
829 return 0;
830 }
831
832 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
833 if (!gpuobj->im_backing || (gpuobj->flags & NVOBJ_FLAG_FAKE))
834 continue;
835
836 gpuobj->im_backing_suspend = vmalloc(gpuobj->im_pramin->size);
837 if (!gpuobj->im_backing_suspend) {
838 nouveau_gpuobj_resume(dev);
839 return -ENOMEM;
840 }
841
Ben Skeggsb3beb162010-09-01 15:24:29 +1000842 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
843 gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000844 }
845
846 return 0;
847}
848
849void
850nouveau_gpuobj_suspend_cleanup(struct drm_device *dev)
851{
852 struct drm_nouveau_private *dev_priv = dev->dev_private;
853 struct nouveau_gpuobj *gpuobj;
854
855 if (dev_priv->card_type < NV_50) {
856 vfree(dev_priv->susres.ramin_copy);
857 dev_priv->susres.ramin_copy = NULL;
858 return;
859 }
860
861 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
862 if (!gpuobj->im_backing_suspend)
863 continue;
864
865 vfree(gpuobj->im_backing_suspend);
866 gpuobj->im_backing_suspend = NULL;
867 }
868}
869
870void
871nouveau_gpuobj_resume(struct drm_device *dev)
872{
873 struct drm_nouveau_private *dev_priv = dev->dev_private;
874 struct nouveau_gpuobj *gpuobj;
875 int i;
876
877 if (dev_priv->card_type < NV_50) {
878 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
879 nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]);
880 nouveau_gpuobj_suspend_cleanup(dev);
881 return;
882 }
883
884 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
885 if (!gpuobj->im_backing_suspend)
886 continue;
887
Ben Skeggsb3beb162010-09-01 15:24:29 +1000888 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
889 nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000890 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000891 }
892
893 nouveau_gpuobj_suspend_cleanup(dev);
894}
895
896int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
897 struct drm_file *file_priv)
898{
899 struct drm_nouveau_private *dev_priv = dev->dev_private;
900 struct drm_nouveau_grobj_alloc *init = data;
901 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
902 struct nouveau_pgraph_object_class *grc;
903 struct nouveau_gpuobj *gr = NULL;
904 struct nouveau_channel *chan;
905 int ret;
906
Ben Skeggs6ee73862009-12-11 19:24:15 +1000907 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
908
909 if (init->handle == ~0)
910 return -EINVAL;
911
912 grc = pgraph->grclass;
913 while (grc->id) {
914 if (grc->id == init->class)
915 break;
916 grc++;
917 }
918
919 if (!grc->id) {
920 NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class);
921 return -EPERM;
922 }
923
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000924 if (nouveau_ramht_find(chan, init->handle))
Ben Skeggs6ee73862009-12-11 19:24:15 +1000925 return -EEXIST;
926
927 if (!grc->software)
928 ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
929 else
930 ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000931 if (ret) {
932 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
933 ret, init->channel, init->handle);
934 return ret;
935 }
936
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000937 ret = nouveau_ramht_insert(chan, init->handle, gr);
938 nouveau_gpuobj_ref(NULL, &gr);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000939 if (ret) {
940 NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
941 ret, init->channel, init->handle);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000942 return ret;
943 }
944
945 return 0;
946}
947
948int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
949 struct drm_file *file_priv)
950{
951 struct drm_nouveau_gpuobj_free *objfree = data;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000952 struct nouveau_gpuobj *gpuobj;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000953 struct nouveau_channel *chan;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000954
Ben Skeggs6ee73862009-12-11 19:24:15 +1000955 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
956
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000957 gpuobj = nouveau_ramht_find(chan, objfree->handle);
958 if (!gpuobj)
959 return -ENOENT;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000960
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000961 nouveau_ramht_remove(chan, objfree->handle);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000962 return 0;
963}
Ben Skeggsb3beb162010-09-01 15:24:29 +1000964
965u32
966nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
967{
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000968 return nv_ri32(gpuobj->dev, gpuobj->pinst + offset);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000969}
970
971void
972nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
973{
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000974 nv_wi32(gpuobj->dev, gpuobj->pinst + offset, val);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000975}