blob: 552f5131650f8cdca97fdee9f69337126438c1b8 [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright (C) 2006 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28/*
29 * Authors:
30 * Ben Skeggs <darktama@iinet.net.au>
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "nouveau_drv.h"
36#include "nouveau_drm.h"
Ben Skeggs479dcae2010-09-01 15:24:28 +100037#include "nouveau_ramht.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100038
39/* NVidia uses context objects to drive drawing operations.
40
41 Context objects can be selected into 8 subchannels in the FIFO,
42 and then used via DMA command buffers.
43
44 A context object is referenced by a user defined handle (CARD32). The HW
45 looks up graphics objects in a hash table in the instance RAM.
46
47 An entry in the hash table consists of 2 CARD32. The first CARD32 contains
48 the handle, the second one a bitfield, that contains the address of the
49 object in instance RAM.
50
51 The format of the second CARD32 seems to be:
52
53 NV4 to NV30:
54
55 15: 0 instance_addr >> 4
56 17:16 engine (here uses 1 = graphics)
57 28:24 channel id (here uses 0)
58 31 valid (use 1)
59
60 NV40:
61
62 15: 0 instance_addr >> 4 (maybe 19-0)
63 21:20 engine (here uses 1 = graphics)
64 I'm unsure about the other bits, but using 0 seems to work.
65
66 The key into the hash table depends on the object handle and channel id and
67 is given as:
68*/
Ben Skeggs6ee73862009-12-11 19:24:15 +100069
70int
71nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
72 uint32_t size, int align, uint32_t flags,
73 struct nouveau_gpuobj **gpuobj_ret)
74{
75 struct drm_nouveau_private *dev_priv = dev->dev_private;
76 struct nouveau_engine *engine = &dev_priv->engine;
77 struct nouveau_gpuobj *gpuobj;
Ben Skeggsb833ac22010-06-01 15:32:24 +100078 struct drm_mm *pramin = NULL;
Ben Skeggs6ee73862009-12-11 19:24:15 +100079 int ret;
80
81 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
82 chan ? chan->id : -1, size, align, flags);
83
84 if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
85 return -EINVAL;
86
87 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
88 if (!gpuobj)
89 return -ENOMEM;
90 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
Ben Skeggsb3beb162010-09-01 15:24:29 +100091 gpuobj->dev = dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +100092 gpuobj->flags = flags;
93 gpuobj->im_channel = chan;
94
95 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
96
97 /* Choose between global instmem heap, and per-channel private
98 * instmem heap. On <NV50 allow requests for private instmem
99 * to be satisfied from global heap if no per-channel area
100 * available.
101 */
102 if (chan) {
Ben Skeggs816544b2010-07-08 13:15:05 +1000103 NV_DEBUG(dev, "channel heap\n");
104 pramin = &chan->ramin_heap;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000105 } else {
106 NV_DEBUG(dev, "global heap\n");
Ben Skeggsb833ac22010-06-01 15:32:24 +1000107 pramin = &dev_priv->ramin_heap;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000108
Ben Skeggs6ee73862009-12-11 19:24:15 +1000109 ret = engine->instmem.populate(dev, gpuobj, &size);
110 if (ret) {
111 nouveau_gpuobj_del(dev, &gpuobj);
112 return ret;
113 }
114 }
115
116 /* Allocate a chunk of the PRAMIN aperture */
Ben Skeggsb833ac22010-06-01 15:32:24 +1000117 gpuobj->im_pramin = drm_mm_search_free(pramin, size, align, 0);
118 if (gpuobj->im_pramin)
119 gpuobj->im_pramin = drm_mm_get_block(gpuobj->im_pramin, size, align);
120
Ben Skeggs6ee73862009-12-11 19:24:15 +1000121 if (!gpuobj->im_pramin) {
122 nouveau_gpuobj_del(dev, &gpuobj);
123 return -ENOMEM;
124 }
125
126 if (!chan) {
127 ret = engine->instmem.bind(dev, gpuobj);
128 if (ret) {
129 nouveau_gpuobj_del(dev, &gpuobj);
130 return ret;
131 }
132 }
133
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000134 /* calculate the various different addresses for the object */
135 if (chan) {
136 gpuobj->pinst = gpuobj->im_pramin->start +
137 chan->ramin->gpuobj->im_pramin->start;
138 if (dev_priv->card_type < NV_50) {
139 gpuobj->cinst = gpuobj->pinst;
140 } else {
141 gpuobj->cinst = gpuobj->im_pramin->start;
142 gpuobj->vinst = gpuobj->im_pramin->start +
143 chan->ramin->gpuobj->im_backing_start;
144 }
145 } else {
146 gpuobj->pinst = gpuobj->im_pramin->start;
147 gpuobj->cinst = 0xdeadbeef;
148 gpuobj->vinst = gpuobj->im_backing_start;
149 }
150
Ben Skeggs6ee73862009-12-11 19:24:15 +1000151 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
152 int i;
153
Ben Skeggs6ee73862009-12-11 19:24:15 +1000154 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000155 nv_wo32(gpuobj, i, 0);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000156 engine->instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000157 }
158
159 *gpuobj_ret = gpuobj;
160 return 0;
161}
162
163int
164nouveau_gpuobj_early_init(struct drm_device *dev)
165{
166 struct drm_nouveau_private *dev_priv = dev->dev_private;
167
168 NV_DEBUG(dev, "\n");
169
170 INIT_LIST_HEAD(&dev_priv->gpuobj_list);
171
172 return 0;
173}
174
175int
176nouveau_gpuobj_init(struct drm_device *dev)
177{
178 struct drm_nouveau_private *dev_priv = dev->dev_private;
179 int ret;
180
181 NV_DEBUG(dev, "\n");
182
183 if (dev_priv->card_type < NV_50) {
184 ret = nouveau_gpuobj_new_fake(dev,
185 dev_priv->ramht_offset, ~0, dev_priv->ramht_size,
186 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS,
187 &dev_priv->ramht, NULL);
188 if (ret)
189 return ret;
190 }
191
192 return 0;
193}
194
195void
196nouveau_gpuobj_takedown(struct drm_device *dev)
197{
198 struct drm_nouveau_private *dev_priv = dev->dev_private;
199
200 NV_DEBUG(dev, "\n");
201
202 nouveau_gpuobj_del(dev, &dev_priv->ramht);
203}
204
205void
206nouveau_gpuobj_late_takedown(struct drm_device *dev)
207{
208 struct drm_nouveau_private *dev_priv = dev->dev_private;
209 struct nouveau_gpuobj *gpuobj = NULL;
210 struct list_head *entry, *tmp;
211
212 NV_DEBUG(dev, "\n");
213
214 list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) {
215 gpuobj = list_entry(entry, struct nouveau_gpuobj, list);
216
217 NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n",
218 gpuobj, gpuobj->refcount);
219 gpuobj->refcount = 0;
220 nouveau_gpuobj_del(dev, &gpuobj);
221 }
222}
223
224int
225nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
226{
227 struct drm_nouveau_private *dev_priv = dev->dev_private;
228 struct nouveau_engine *engine = &dev_priv->engine;
229 struct nouveau_gpuobj *gpuobj;
230 int i;
231
232 NV_DEBUG(dev, "gpuobj %p\n", pgpuobj ? *pgpuobj : NULL);
233
234 if (!dev_priv || !pgpuobj || !(*pgpuobj))
235 return -EINVAL;
236 gpuobj = *pgpuobj;
237
238 if (gpuobj->refcount != 0) {
239 NV_ERROR(dev, "gpuobj refcount is %d\n", gpuobj->refcount);
240 return -EINVAL;
241 }
242
243 if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000244 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000245 nv_wo32(gpuobj, i, 0);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000246 engine->instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000247 }
248
249 if (gpuobj->dtor)
250 gpuobj->dtor(dev, gpuobj);
251
252 if (gpuobj->im_backing && !(gpuobj->flags & NVOBJ_FLAG_FAKE))
253 engine->instmem.clear(dev, gpuobj);
254
255 if (gpuobj->im_pramin) {
256 if (gpuobj->flags & NVOBJ_FLAG_FAKE)
257 kfree(gpuobj->im_pramin);
258 else
Ben Skeggsb833ac22010-06-01 15:32:24 +1000259 drm_mm_put_block(gpuobj->im_pramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000260 }
261
262 list_del(&gpuobj->list);
263
264 *pgpuobj = NULL;
265 kfree(gpuobj);
266 return 0;
267}
268
269static int
270nouveau_gpuobj_instance_get(struct drm_device *dev,
271 struct nouveau_channel *chan,
272 struct nouveau_gpuobj *gpuobj, uint32_t *inst)
273{
274 struct drm_nouveau_private *dev_priv = dev->dev_private;
275 struct nouveau_gpuobj *cpramin;
276
277 /* <NV50 use PRAMIN address everywhere */
278 if (dev_priv->card_type < NV_50) {
279 *inst = gpuobj->im_pramin->start;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000280 if (gpuobj->im_channel) {
281 cpramin = gpuobj->im_channel->ramin->gpuobj;
282 *inst += cpramin->im_pramin->start;
283 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000284 return 0;
285 }
286
Ben Skeggs6ee73862009-12-11 19:24:15 +1000287 /* NV50 channel-local instance */
288 if (chan) {
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000289 *inst = gpuobj->im_pramin->start;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000290 return 0;
291 }
292
293 /* NV50 global (VRAM) instance */
294 if (!gpuobj->im_channel) {
295 /* ...from global heap */
296 if (!gpuobj->im_backing) {
297 NV_ERROR(dev, "AII, no VRAM backing gpuobj\n");
298 return -EINVAL;
299 }
300 *inst = gpuobj->im_backing_start;
301 return 0;
302 } else {
303 /* ...from local heap */
304 cpramin = gpuobj->im_channel->ramin->gpuobj;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000305 *inst = cpramin->im_backing_start + gpuobj->im_pramin->start;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000306 return 0;
307 }
308
309 return -EINVAL;
310}
311
312int
313nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan,
314 uint32_t handle, struct nouveau_gpuobj *gpuobj,
315 struct nouveau_gpuobj_ref **ref_ret)
316{
317 struct drm_nouveau_private *dev_priv = dev->dev_private;
318 struct nouveau_gpuobj_ref *ref;
319 uint32_t instance;
320 int ret;
321
322 NV_DEBUG(dev, "ch%d h=0x%08x gpuobj=%p\n",
323 chan ? chan->id : -1, handle, gpuobj);
324
325 if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL))
326 return -EINVAL;
327
328 if (!chan && !ref_ret)
329 return -EINVAL;
330
331 if (gpuobj->engine == NVOBJ_ENGINE_SW && !gpuobj->im_pramin) {
332 /* sw object */
333 instance = 0x40;
334 } else {
335 ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance);
336 if (ret)
337 return ret;
338 }
339
340 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
341 if (!ref)
342 return -ENOMEM;
343 INIT_LIST_HEAD(&ref->list);
344 ref->gpuobj = gpuobj;
345 ref->channel = chan;
346 ref->instance = instance;
347
348 if (!ref_ret) {
349 ref->handle = handle;
350
351 ret = nouveau_ramht_insert(dev, ref);
352 if (ret) {
353 kfree(ref);
354 return ret;
355 }
356 } else {
357 ref->handle = ~0;
358 *ref_ret = ref;
359 }
360
361 ref->gpuobj->refcount++;
362 return 0;
363}
364
365int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref)
366{
367 struct nouveau_gpuobj_ref *ref;
368
369 NV_DEBUG(dev, "ref %p\n", pref ? *pref : NULL);
370
371 if (!dev || !pref || *pref == NULL)
372 return -EINVAL;
373 ref = *pref;
374
375 if (ref->handle != ~0)
376 nouveau_ramht_remove(dev, ref);
377
378 if (ref->gpuobj) {
379 ref->gpuobj->refcount--;
380
381 if (ref->gpuobj->refcount == 0) {
382 if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS))
383 nouveau_gpuobj_del(dev, &ref->gpuobj);
384 }
385 }
386
387 *pref = NULL;
388 kfree(ref);
389 return 0;
390}
391
392int
393nouveau_gpuobj_new_ref(struct drm_device *dev,
394 struct nouveau_channel *oc, struct nouveau_channel *rc,
395 uint32_t handle, uint32_t size, int align,
396 uint32_t flags, struct nouveau_gpuobj_ref **ref)
397{
398 struct nouveau_gpuobj *gpuobj = NULL;
399 int ret;
400
401 ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj);
402 if (ret)
403 return ret;
404
405 ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref);
406 if (ret) {
407 nouveau_gpuobj_del(dev, &gpuobj);
408 return ret;
409 }
410
411 return 0;
412}
413
414int
415nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle,
416 struct nouveau_gpuobj_ref **ref_ret)
417{
418 struct nouveau_gpuobj_ref *ref;
419 struct list_head *entry, *tmp;
420
421 list_for_each_safe(entry, tmp, &chan->ramht_refs) {
422 ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
423
424 if (ref->handle == handle) {
425 if (ref_ret)
426 *ref_ret = ref;
427 return 0;
428 }
429 }
430
431 return -EINVAL;
432}
433
434int
435nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
436 uint32_t b_offset, uint32_t size,
437 uint32_t flags, struct nouveau_gpuobj **pgpuobj,
438 struct nouveau_gpuobj_ref **pref)
439{
440 struct drm_nouveau_private *dev_priv = dev->dev_private;
441 struct nouveau_gpuobj *gpuobj = NULL;
442 int i;
443
444 NV_DEBUG(dev,
445 "p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n",
446 p_offset, b_offset, size, flags);
447
448 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
449 if (!gpuobj)
450 return -ENOMEM;
451 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000452 gpuobj->dev = dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000453 gpuobj->im_channel = NULL;
454 gpuobj->flags = flags | NVOBJ_FLAG_FAKE;
455
456 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
457
458 if (p_offset != ~0) {
Ben Skeggsb833ac22010-06-01 15:32:24 +1000459 gpuobj->im_pramin = kzalloc(sizeof(struct drm_mm_node),
Ben Skeggs6ee73862009-12-11 19:24:15 +1000460 GFP_KERNEL);
461 if (!gpuobj->im_pramin) {
462 nouveau_gpuobj_del(dev, &gpuobj);
463 return -ENOMEM;
464 }
465 gpuobj->im_pramin->start = p_offset;
466 gpuobj->im_pramin->size = size;
467 }
468
469 if (b_offset != ~0) {
470 gpuobj->im_backing = (struct nouveau_bo *)-1;
471 gpuobj->im_backing_start = b_offset;
472 }
473
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000474 gpuobj->pinst = gpuobj->im_pramin->start;
475 gpuobj->cinst = 0xdeadbeef;
476 gpuobj->vinst = gpuobj->im_backing_start;
477
Ben Skeggs6ee73862009-12-11 19:24:15 +1000478 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000479 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000480 nv_wo32(gpuobj, i, 0);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000481 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000482 }
483
484 if (pref) {
485 i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref);
486 if (i) {
487 nouveau_gpuobj_del(dev, &gpuobj);
488 return i;
489 }
490 }
491
492 if (pgpuobj)
493 *pgpuobj = gpuobj;
494 return 0;
495}
496
497
498static uint32_t
499nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
500{
501 struct drm_nouveau_private *dev_priv = dev->dev_private;
502
503 /*XXX: dodgy hack for now */
504 if (dev_priv->card_type >= NV_50)
505 return 24;
506 if (dev_priv->card_type >= NV_40)
507 return 32;
508 return 16;
509}
510
511/*
512 DMA objects are used to reference a piece of memory in the
513 framebuffer, PCI or AGP address space. Each object is 16 bytes big
514 and looks as follows:
515
516 entry[0]
517 11:0 class (seems like I can always use 0 here)
518 12 page table present?
519 13 page entry linear?
520 15:14 access: 0 rw, 1 ro, 2 wo
521 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
522 31:20 dma adjust (bits 0-11 of the address)
523 entry[1]
524 dma limit (size of transfer)
525 entry[X]
526 1 0 readonly, 1 readwrite
527 31:12 dma frame address of the page (bits 12-31 of the address)
528 entry[N]
529 page table terminator, same value as the first pte, as does nvidia
530 rivatv uses 0xffffffff
531
532 Non linear page tables need a list of frame addresses afterwards,
533 the rivatv project has some info on this.
534
535 The method below creates a DMA object in instance RAM and returns a handle
536 to it that can be used to set up context objects.
537*/
538int
539nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
540 uint64_t offset, uint64_t size, int access,
541 int target, struct nouveau_gpuobj **gpuobj)
542{
543 struct drm_device *dev = chan->dev;
544 struct drm_nouveau_private *dev_priv = dev->dev_private;
545 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
546 int ret;
547
548 NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
549 chan->id, class, offset, size);
550 NV_DEBUG(dev, "access=%d target=%d\n", access, target);
551
552 switch (target) {
553 case NV_DMA_TARGET_AGP:
554 offset += dev_priv->gart_info.aper_base;
555 break;
556 default:
557 break;
558 }
559
560 ret = nouveau_gpuobj_new(dev, chan,
561 nouveau_gpuobj_class_instmem_size(dev, class),
562 16, NVOBJ_FLAG_ZERO_ALLOC |
563 NVOBJ_FLAG_ZERO_FREE, gpuobj);
564 if (ret) {
565 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
566 return ret;
567 }
568
Ben Skeggs6ee73862009-12-11 19:24:15 +1000569 if (dev_priv->card_type < NV_50) {
570 uint32_t frame, adjust, pte_flags = 0;
571
572 if (access != NV_DMA_ACCESS_RO)
573 pte_flags |= (1<<1);
574 adjust = offset & 0x00000fff;
575 frame = offset & ~0x00000fff;
576
Ben Skeggsb3beb162010-09-01 15:24:29 +1000577 nv_wo32(*gpuobj, 0, ((1<<12) | (1<<13) | (adjust << 20) |
578 (access << 14) | (target << 16) |
579 class));
580 nv_wo32(*gpuobj, 4, size - 1);
581 nv_wo32(*gpuobj, 8, frame | pte_flags);
582 nv_wo32(*gpuobj, 12, frame | pte_flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000583 } else {
584 uint64_t limit = offset + size - 1;
585 uint32_t flags0, flags5;
586
587 if (target == NV_DMA_TARGET_VIDMEM) {
588 flags0 = 0x00190000;
589 flags5 = 0x00010000;
590 } else {
591 flags0 = 0x7fc00000;
592 flags5 = 0x00080000;
593 }
594
Ben Skeggsb3beb162010-09-01 15:24:29 +1000595 nv_wo32(*gpuobj, 0, flags0 | class);
596 nv_wo32(*gpuobj, 4, lower_32_bits(limit));
597 nv_wo32(*gpuobj, 8, lower_32_bits(offset));
598 nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) |
599 (upper_32_bits(offset) & 0xff));
600 nv_wo32(*gpuobj, 20, flags5);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000601 }
602
Ben Skeggsf56cb862010-07-08 11:29:10 +1000603 instmem->flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000604
605 (*gpuobj)->engine = NVOBJ_ENGINE_SW;
606 (*gpuobj)->class = class;
607 return 0;
608}
609
610int
611nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
612 uint64_t offset, uint64_t size, int access,
613 struct nouveau_gpuobj **gpuobj,
614 uint32_t *o_ret)
615{
616 struct drm_device *dev = chan->dev;
617 struct drm_nouveau_private *dev_priv = dev->dev_private;
618 int ret;
619
620 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
621 (dev_priv->card_type >= NV_50 &&
622 dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
623 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
624 offset + dev_priv->vm_gart_base,
625 size, access, NV_DMA_TARGET_AGP,
626 gpuobj);
627 if (o_ret)
628 *o_ret = 0;
629 } else
630 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
631 *gpuobj = dev_priv->gart_info.sg_ctxdma;
632 if (offset & ~0xffffffffULL) {
633 NV_ERROR(dev, "obj offset exceeds 32-bits\n");
634 return -EINVAL;
635 }
636 if (o_ret)
637 *o_ret = (uint32_t)offset;
638 ret = (*gpuobj != NULL) ? 0 : -EINVAL;
639 } else {
640 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
641 return -EINVAL;
642 }
643
644 return ret;
645}
646
647/* Context objects in the instance RAM have the following structure.
648 * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
649
650 NV4 - NV30:
651
652 entry[0]
653 11:0 class
654 12 chroma key enable
655 13 user clip enable
656 14 swizzle enable
657 17:15 patch config:
658 scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
659 18 synchronize enable
660 19 endian: 1 big, 0 little
661 21:20 dither mode
662 23 single step enable
663 24 patch status: 0 invalid, 1 valid
664 25 context_surface 0: 1 valid
665 26 context surface 1: 1 valid
666 27 context pattern: 1 valid
667 28 context rop: 1 valid
668 29,30 context beta, beta4
669 entry[1]
670 7:0 mono format
671 15:8 color format
672 31:16 notify instance address
673 entry[2]
674 15:0 dma 0 instance address
675 31:16 dma 1 instance address
676 entry[3]
677 dma method traps
678
679 NV40:
680 No idea what the exact format is. Here's what can be deducted:
681
682 entry[0]:
683 11:0 class (maybe uses more bits here?)
684 17 user clip enable
685 21:19 patch config
686 25 patch status valid ?
687 entry[1]:
688 15:0 DMA notifier (maybe 20:0)
689 entry[2]:
690 15:0 DMA 0 instance (maybe 20:0)
691 24 big endian
692 entry[3]:
693 15:0 DMA 1 instance (maybe 20:0)
694 entry[4]:
695 entry[5]:
696 set to 0?
697*/
698int
699nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
700 struct nouveau_gpuobj **gpuobj)
701{
702 struct drm_device *dev = chan->dev;
703 struct drm_nouveau_private *dev_priv = dev->dev_private;
704 int ret;
705
706 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
707
708 ret = nouveau_gpuobj_new(dev, chan,
709 nouveau_gpuobj_class_instmem_size(dev, class),
710 16,
711 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
712 gpuobj);
713 if (ret) {
714 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
715 return ret;
716 }
717
Ben Skeggs6ee73862009-12-11 19:24:15 +1000718 if (dev_priv->card_type >= NV_50) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000719 nv_wo32(*gpuobj, 0, class);
720 nv_wo32(*gpuobj, 20, 0x00010000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000721 } else {
722 switch (class) {
723 case NV_CLASS_NULL:
Ben Skeggsb3beb162010-09-01 15:24:29 +1000724 nv_wo32(*gpuobj, 0, 0x00001030);
725 nv_wo32(*gpuobj, 4, 0xFFFFFFFF);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000726 break;
727 default:
728 if (dev_priv->card_type >= NV_40) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000729 nv_wo32(*gpuobj, 0, class);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000730#ifdef __BIG_ENDIAN
Ben Skeggsb3beb162010-09-01 15:24:29 +1000731 nv_wo32(*gpuobj, 8, 0x01000000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000732#endif
733 } else {
734#ifdef __BIG_ENDIAN
Ben Skeggsb3beb162010-09-01 15:24:29 +1000735 nv_wo32(*gpuobj, 0, class | 0x00080000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000736#else
Ben Skeggsb3beb162010-09-01 15:24:29 +1000737 nv_wo32(*gpuobj, 0, class);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000738#endif
739 }
740 }
741 }
Ben Skeggsf56cb862010-07-08 11:29:10 +1000742 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000743
744 (*gpuobj)->engine = NVOBJ_ENGINE_GR;
745 (*gpuobj)->class = class;
746 return 0;
747}
748
Francisco Jerezf03a3142009-12-26 02:42:45 +0100749int
Ben Skeggs6ee73862009-12-11 19:24:15 +1000750nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
751 struct nouveau_gpuobj **gpuobj_ret)
752{
Marcin Slusarzdd19e442010-01-30 15:41:00 +0100753 struct drm_nouveau_private *dev_priv;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000754 struct nouveau_gpuobj *gpuobj;
755
756 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
757 return -EINVAL;
Marcin Slusarzdd19e442010-01-30 15:41:00 +0100758 dev_priv = chan->dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000759
760 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
761 if (!gpuobj)
762 return -ENOMEM;
763 gpuobj->engine = NVOBJ_ENGINE_SW;
764 gpuobj->class = class;
765
766 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
767 *gpuobj_ret = gpuobj;
768 return 0;
769}
770
771static int
772nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
773{
774 struct drm_device *dev = chan->dev;
775 struct drm_nouveau_private *dev_priv = dev->dev_private;
776 struct nouveau_gpuobj *pramin = NULL;
777 uint32_t size;
778 uint32_t base;
779 int ret;
780
781 NV_DEBUG(dev, "ch%d\n", chan->id);
782
783 /* Base amount for object storage (4KiB enough?) */
784 size = 0x1000;
785 base = 0;
786
787 /* PGRAPH context */
Ben Skeggs816544b2010-07-08 13:15:05 +1000788 size += dev_priv->engine.graph.grctx_size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000789
790 if (dev_priv->card_type == NV_50) {
791 /* Various fixed table thingos */
792 size += 0x1400; /* mostly unknown stuff */
793 size += 0x4000; /* vm pd */
794 base = 0x6000;
795 /* RAMHT, not sure about setting size yet, 32KiB to be safe */
796 size += 0x8000;
797 /* RAMFC */
798 size += 0x1000;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000799 }
800
Ben Skeggs6ee73862009-12-11 19:24:15 +1000801 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0,
802 &chan->ramin);
803 if (ret) {
804 NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
805 return ret;
806 }
807 pramin = chan->ramin->gpuobj;
808
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000809 ret = drm_mm_init(&chan->ramin_heap, base, size);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000810 if (ret) {
811 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
812 nouveau_gpuobj_ref_del(dev, &chan->ramin);
813 return ret;
814 }
815
816 return 0;
817}
818
819int
820nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
821 uint32_t vram_h, uint32_t tt_h)
822{
823 struct drm_device *dev = chan->dev;
824 struct drm_nouveau_private *dev_priv = dev->dev_private;
825 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
826 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
827 int ret, i;
828
829 INIT_LIST_HEAD(&chan->ramht_refs);
830
831 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
832
Ben Skeggs816544b2010-07-08 13:15:05 +1000833 /* Allocate a chunk of memory for per-channel object storage */
834 ret = nouveau_gpuobj_channel_init_pramin(chan);
835 if (ret) {
836 NV_ERROR(dev, "init pramin\n");
837 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000838 }
839
840 /* NV50 VM
841 * - Allocate per-channel page-directory
842 * - Map GART and VRAM into the channel's address space at the
843 * locations determined during init.
844 */
845 if (dev_priv->card_type >= NV_50) {
846 uint32_t vm_offset, pde;
847
Ben Skeggs6ee73862009-12-11 19:24:15 +1000848 vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
849 vm_offset += chan->ramin->gpuobj->im_pramin->start;
850
851 ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
852 0, &chan->vm_pd, NULL);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000853 if (ret)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000854 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000855 for (i = 0; i < 0x4000; i += 8) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000856 nv_wo32(chan->vm_pd, i + 0, 0x00000000);
857 nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000858 }
859
Ben Skeggsb3beb162010-09-01 15:24:29 +1000860 pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000861 ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
862 dev_priv->gart_info.sg_ctxdma,
863 &chan->vm_gart_pt);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000864 if (ret)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000865 return ret;
Ben Skeggsb3beb162010-09-01 15:24:29 +1000866 nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->instance | 3);
867 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000868
Ben Skeggsb3beb162010-09-01 15:24:29 +1000869 pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000870 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
871 ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
872 dev_priv->vm_vram_pt[i],
873 &chan->vm_vram_pt[i]);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000874 if (ret)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000875 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000876
Ben Skeggsb3beb162010-09-01 15:24:29 +1000877 nv_wo32(chan->vm_pd, pde + 0,
878 chan->vm_vram_pt[i]->instance | 0x61);
879 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
880 pde += 8;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000881 }
882
Ben Skeggsf56cb862010-07-08 11:29:10 +1000883 instmem->flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000884 }
885
886 /* RAMHT */
887 if (dev_priv->card_type < NV_50) {
888 ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht,
889 &chan->ramht);
890 if (ret)
891 return ret;
892 } else {
893 ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0,
894 0x8000, 16,
895 NVOBJ_FLAG_ZERO_ALLOC,
896 &chan->ramht);
897 if (ret)
898 return ret;
899 }
900
901 /* VRAM ctxdma */
902 if (dev_priv->card_type >= NV_50) {
903 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
904 0, dev_priv->vm_end,
905 NV_DMA_ACCESS_RW,
906 NV_DMA_TARGET_AGP, &vram);
907 if (ret) {
908 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
909 return ret;
910 }
911 } else {
912 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
913 0, dev_priv->fb_available_size,
914 NV_DMA_ACCESS_RW,
915 NV_DMA_TARGET_VIDMEM, &vram);
916 if (ret) {
917 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
918 return ret;
919 }
920 }
921
922 ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL);
923 if (ret) {
924 NV_ERROR(dev, "Error referencing VRAM ctxdma: %d\n", ret);
925 return ret;
926 }
927
928 /* TT memory ctxdma */
929 if (dev_priv->card_type >= NV_50) {
930 tt = vram;
931 } else
932 if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
933 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
934 dev_priv->gart_info.aper_size,
935 NV_DMA_ACCESS_RW, &tt, NULL);
936 } else {
937 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
938 ret = -EINVAL;
939 }
940
941 if (ret) {
942 NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
943 return ret;
944 }
945
946 ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL);
947 if (ret) {
948 NV_ERROR(dev, "Error referencing TT ctxdma: %d\n", ret);
949 return ret;
950 }
951
952 return 0;
953}
954
955void
956nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
957{
958 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
959 struct drm_device *dev = chan->dev;
960 struct list_head *entry, *tmp;
961 struct nouveau_gpuobj_ref *ref;
962 int i;
963
964 NV_DEBUG(dev, "ch%d\n", chan->id);
965
966 if (!chan->ramht_refs.next)
967 return;
968
969 list_for_each_safe(entry, tmp, &chan->ramht_refs) {
970 ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
971
972 nouveau_gpuobj_ref_del(dev, &ref);
973 }
974
975 nouveau_gpuobj_ref_del(dev, &chan->ramht);
976
977 nouveau_gpuobj_del(dev, &chan->vm_pd);
978 nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt);
979 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
980 nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
981
Ben Skeggsb833ac22010-06-01 15:32:24 +1000982 if (chan->ramin_heap.free_stack.next)
983 drm_mm_takedown(&chan->ramin_heap);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000984 if (chan->ramin)
985 nouveau_gpuobj_ref_del(dev, &chan->ramin);
986
987}
988
989int
990nouveau_gpuobj_suspend(struct drm_device *dev)
991{
992 struct drm_nouveau_private *dev_priv = dev->dev_private;
993 struct nouveau_gpuobj *gpuobj;
994 int i;
995
996 if (dev_priv->card_type < NV_50) {
997 dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram);
998 if (!dev_priv->susres.ramin_copy)
999 return -ENOMEM;
1000
1001 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
1002 dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i);
1003 return 0;
1004 }
1005
1006 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
1007 if (!gpuobj->im_backing || (gpuobj->flags & NVOBJ_FLAG_FAKE))
1008 continue;
1009
1010 gpuobj->im_backing_suspend = vmalloc(gpuobj->im_pramin->size);
1011 if (!gpuobj->im_backing_suspend) {
1012 nouveau_gpuobj_resume(dev);
1013 return -ENOMEM;
1014 }
1015
Ben Skeggsb3beb162010-09-01 15:24:29 +10001016 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
1017 gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001018 }
1019
1020 return 0;
1021}
1022
1023void
1024nouveau_gpuobj_suspend_cleanup(struct drm_device *dev)
1025{
1026 struct drm_nouveau_private *dev_priv = dev->dev_private;
1027 struct nouveau_gpuobj *gpuobj;
1028
1029 if (dev_priv->card_type < NV_50) {
1030 vfree(dev_priv->susres.ramin_copy);
1031 dev_priv->susres.ramin_copy = NULL;
1032 return;
1033 }
1034
1035 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
1036 if (!gpuobj->im_backing_suspend)
1037 continue;
1038
1039 vfree(gpuobj->im_backing_suspend);
1040 gpuobj->im_backing_suspend = NULL;
1041 }
1042}
1043
1044void
1045nouveau_gpuobj_resume(struct drm_device *dev)
1046{
1047 struct drm_nouveau_private *dev_priv = dev->dev_private;
1048 struct nouveau_gpuobj *gpuobj;
1049 int i;
1050
1051 if (dev_priv->card_type < NV_50) {
1052 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
1053 nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]);
1054 nouveau_gpuobj_suspend_cleanup(dev);
1055 return;
1056 }
1057
1058 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
1059 if (!gpuobj->im_backing_suspend)
1060 continue;
1061
Ben Skeggsb3beb162010-09-01 15:24:29 +10001062 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
1063 nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]);
Ben Skeggsf56cb862010-07-08 11:29:10 +10001064 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001065 }
1066
1067 nouveau_gpuobj_suspend_cleanup(dev);
1068}
1069
1070int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
1071 struct drm_file *file_priv)
1072{
1073 struct drm_nouveau_private *dev_priv = dev->dev_private;
1074 struct drm_nouveau_grobj_alloc *init = data;
1075 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
1076 struct nouveau_pgraph_object_class *grc;
1077 struct nouveau_gpuobj *gr = NULL;
1078 struct nouveau_channel *chan;
1079 int ret;
1080
Ben Skeggs6ee73862009-12-11 19:24:15 +10001081 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
1082
1083 if (init->handle == ~0)
1084 return -EINVAL;
1085
1086 grc = pgraph->grclass;
1087 while (grc->id) {
1088 if (grc->id == init->class)
1089 break;
1090 grc++;
1091 }
1092
1093 if (!grc->id) {
1094 NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class);
1095 return -EPERM;
1096 }
1097
1098 if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0)
1099 return -EEXIST;
1100
1101 if (!grc->software)
1102 ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
1103 else
1104 ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
1105
1106 if (ret) {
1107 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
1108 ret, init->channel, init->handle);
1109 return ret;
1110 }
1111
1112 ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL);
1113 if (ret) {
1114 NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
1115 ret, init->channel, init->handle);
1116 nouveau_gpuobj_del(dev, &gr);
1117 return ret;
1118 }
1119
1120 return 0;
1121}
1122
1123int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
1124 struct drm_file *file_priv)
1125{
1126 struct drm_nouveau_gpuobj_free *objfree = data;
1127 struct nouveau_gpuobj_ref *ref;
1128 struct nouveau_channel *chan;
1129 int ret;
1130
Ben Skeggs6ee73862009-12-11 19:24:15 +10001131 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
1132
1133 ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref);
1134 if (ret)
1135 return ret;
1136 nouveau_gpuobj_ref_del(dev, &ref);
1137
1138 return 0;
1139}
Ben Skeggsb3beb162010-09-01 15:24:29 +10001140
1141u32
1142nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
1143{
Ben Skeggsde3a6c02010-09-01 15:24:30 +10001144 return nv_ri32(gpuobj->dev, gpuobj->pinst + offset);
Ben Skeggsb3beb162010-09-01 15:24:29 +10001145}
1146
1147void
1148nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
1149{
Ben Skeggsde3a6c02010-09-01 15:24:30 +10001150 nv_wi32(gpuobj->dev, gpuobj->pinst + offset, val);
Ben Skeggsb3beb162010-09-01 15:24:29 +10001151}