blob: d436c3c7f4f5990bcbb9bad629b1f111673d18bb [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright (C) 2006 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28/*
29 * Authors:
30 * Ben Skeggs <darktama@iinet.net.au>
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "nouveau_drv.h"
36#include "nouveau_drm.h"
37
38/* NVidia uses context objects to drive drawing operations.
39
40 Context objects can be selected into 8 subchannels in the FIFO,
41 and then used via DMA command buffers.
42
43 A context object is referenced by a user defined handle (CARD32). The HW
44 looks up graphics objects in a hash table in the instance RAM.
45
46 An entry in the hash table consists of 2 CARD32. The first CARD32 contains
47 the handle, the second one a bitfield, that contains the address of the
48 object in instance RAM.
49
50 The format of the second CARD32 seems to be:
51
52 NV4 to NV30:
53
54 15: 0 instance_addr >> 4
55 17:16 engine (here uses 1 = graphics)
56 28:24 channel id (here uses 0)
57 31 valid (use 1)
58
59 NV40:
60
61 15: 0 instance_addr >> 4 (maybe 19-0)
62 21:20 engine (here uses 1 = graphics)
63 I'm unsure about the other bits, but using 0 seems to work.
64
65 The key into the hash table depends on the object handle and channel id and
66 is given as:
67*/
68static uint32_t
69nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle)
70{
71 struct drm_nouveau_private *dev_priv = dev->dev_private;
72 uint32_t hash = 0;
73 int i;
74
75 NV_DEBUG(dev, "ch%d handle=0x%08x\n", channel, handle);
76
77 for (i = 32; i > 0; i -= dev_priv->ramht_bits) {
78 hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1));
79 handle >>= dev_priv->ramht_bits;
80 }
81
82 if (dev_priv->card_type < NV_50)
83 hash ^= channel << (dev_priv->ramht_bits - 4);
84 hash <<= 3;
85
86 NV_DEBUG(dev, "hash=0x%08x\n", hash);
87 return hash;
88}
89
90static int
91nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
92 uint32_t offset)
93{
94 struct drm_nouveau_private *dev_priv = dev->dev_private;
95 uint32_t ctx = nv_ro32(dev, ramht, (offset + 4)/4);
96
97 if (dev_priv->card_type < NV_40)
98 return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
99 return (ctx != 0);
100}
101
102static int
103nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
104{
105 struct drm_nouveau_private *dev_priv = dev->dev_private;
106 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
107 struct nouveau_channel *chan = ref->channel;
108 struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
109 uint32_t ctx, co, ho;
110
111 if (!ramht) {
112 NV_ERROR(dev, "No hash table!\n");
113 return -EINVAL;
114 }
115
116 if (dev_priv->card_type < NV_40) {
117 ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) |
118 (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
119 (ref->gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
120 } else
121 if (dev_priv->card_type < NV_50) {
122 ctx = (ref->instance >> 4) |
123 (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
124 (ref->gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
125 } else {
126 if (ref->gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
127 ctx = (ref->instance << 10) | 2;
128 } else {
129 ctx = (ref->instance >> 4) |
130 ((ref->gpuobj->engine <<
131 NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
132 }
133 }
134
135 instmem->prepare_access(dev, true);
136 co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
137 do {
138 if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
139 NV_DEBUG(dev,
140 "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
141 chan->id, co, ref->handle, ctx);
142 nv_wo32(dev, ramht, (co + 0)/4, ref->handle);
143 nv_wo32(dev, ramht, (co + 4)/4, ctx);
144
145 list_add_tail(&ref->list, &chan->ramht_refs);
146 instmem->finish_access(dev);
147 return 0;
148 }
149 NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
150 chan->id, co, nv_ro32(dev, ramht, co/4));
151
152 co += 8;
153 if (co >= dev_priv->ramht_size)
154 co = 0;
155 } while (co != ho);
156 instmem->finish_access(dev);
157
158 NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
159 return -ENOMEM;
160}
161
162static void
163nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
164{
165 struct drm_nouveau_private *dev_priv = dev->dev_private;
166 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
167 struct nouveau_channel *chan = ref->channel;
168 struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
169 uint32_t co, ho;
170
171 if (!ramht) {
172 NV_ERROR(dev, "No hash table!\n");
173 return;
174 }
175
176 instmem->prepare_access(dev, true);
177 co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
178 do {
179 if (nouveau_ramht_entry_valid(dev, ramht, co) &&
180 (ref->handle == nv_ro32(dev, ramht, (co/4)))) {
181 NV_DEBUG(dev,
182 "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
183 chan->id, co, ref->handle,
184 nv_ro32(dev, ramht, (co + 4)));
185 nv_wo32(dev, ramht, (co + 0)/4, 0x00000000);
186 nv_wo32(dev, ramht, (co + 4)/4, 0x00000000);
187
188 list_del(&ref->list);
189 instmem->finish_access(dev);
190 return;
191 }
192
193 co += 8;
194 if (co >= dev_priv->ramht_size)
195 co = 0;
196 } while (co != ho);
197 list_del(&ref->list);
198 instmem->finish_access(dev);
199
200 NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
201 chan->id, ref->handle);
202}
203
204int
205nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
206 uint32_t size, int align, uint32_t flags,
207 struct nouveau_gpuobj **gpuobj_ret)
208{
209 struct drm_nouveau_private *dev_priv = dev->dev_private;
210 struct nouveau_engine *engine = &dev_priv->engine;
211 struct nouveau_gpuobj *gpuobj;
Ben Skeggsb833ac22010-06-01 15:32:24 +1000212 struct drm_mm *pramin = NULL;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000213 int ret;
214
215 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
216 chan ? chan->id : -1, size, align, flags);
217
218 if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
219 return -EINVAL;
220
221 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
222 if (!gpuobj)
223 return -ENOMEM;
224 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
225 gpuobj->flags = flags;
226 gpuobj->im_channel = chan;
227
228 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
229
230 /* Choose between global instmem heap, and per-channel private
231 * instmem heap. On <NV50 allow requests for private instmem
232 * to be satisfied from global heap if no per-channel area
233 * available.
234 */
235 if (chan) {
Ben Skeggsb833ac22010-06-01 15:32:24 +1000236 if (chan->ramin_heap.ml_entry.next) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000237 NV_DEBUG(dev, "private heap\n");
Ben Skeggsb833ac22010-06-01 15:32:24 +1000238 pramin = &chan->ramin_heap;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000239 } else
240 if (dev_priv->card_type < NV_50) {
241 NV_DEBUG(dev, "global heap fallback\n");
Ben Skeggsb833ac22010-06-01 15:32:24 +1000242 pramin = &dev_priv->ramin_heap;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000243 }
244 } else {
245 NV_DEBUG(dev, "global heap\n");
Ben Skeggsb833ac22010-06-01 15:32:24 +1000246 pramin = &dev_priv->ramin_heap;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000247 }
248
249 if (!pramin) {
250 NV_ERROR(dev, "No PRAMIN heap!\n");
251 return -EINVAL;
252 }
253
254 if (!chan) {
255 ret = engine->instmem.populate(dev, gpuobj, &size);
256 if (ret) {
257 nouveau_gpuobj_del(dev, &gpuobj);
258 return ret;
259 }
260 }
261
262 /* Allocate a chunk of the PRAMIN aperture */
Ben Skeggsb833ac22010-06-01 15:32:24 +1000263 gpuobj->im_pramin = drm_mm_search_free(pramin, size, align, 0);
264 if (gpuobj->im_pramin)
265 gpuobj->im_pramin = drm_mm_get_block(gpuobj->im_pramin, size, align);
266
Ben Skeggs6ee73862009-12-11 19:24:15 +1000267 if (!gpuobj->im_pramin) {
268 nouveau_gpuobj_del(dev, &gpuobj);
269 return -ENOMEM;
270 }
271
272 if (!chan) {
273 ret = engine->instmem.bind(dev, gpuobj);
274 if (ret) {
275 nouveau_gpuobj_del(dev, &gpuobj);
276 return ret;
277 }
278 }
279
280 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
281 int i;
282
283 engine->instmem.prepare_access(dev, true);
284 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
285 nv_wo32(dev, gpuobj, i/4, 0);
286 engine->instmem.finish_access(dev);
287 }
288
289 *gpuobj_ret = gpuobj;
290 return 0;
291}
292
293int
294nouveau_gpuobj_early_init(struct drm_device *dev)
295{
296 struct drm_nouveau_private *dev_priv = dev->dev_private;
297
298 NV_DEBUG(dev, "\n");
299
300 INIT_LIST_HEAD(&dev_priv->gpuobj_list);
301
302 return 0;
303}
304
305int
306nouveau_gpuobj_init(struct drm_device *dev)
307{
308 struct drm_nouveau_private *dev_priv = dev->dev_private;
309 int ret;
310
311 NV_DEBUG(dev, "\n");
312
313 if (dev_priv->card_type < NV_50) {
314 ret = nouveau_gpuobj_new_fake(dev,
315 dev_priv->ramht_offset, ~0, dev_priv->ramht_size,
316 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS,
317 &dev_priv->ramht, NULL);
318 if (ret)
319 return ret;
320 }
321
322 return 0;
323}
324
325void
326nouveau_gpuobj_takedown(struct drm_device *dev)
327{
328 struct drm_nouveau_private *dev_priv = dev->dev_private;
329
330 NV_DEBUG(dev, "\n");
331
332 nouveau_gpuobj_del(dev, &dev_priv->ramht);
333}
334
335void
336nouveau_gpuobj_late_takedown(struct drm_device *dev)
337{
338 struct drm_nouveau_private *dev_priv = dev->dev_private;
339 struct nouveau_gpuobj *gpuobj = NULL;
340 struct list_head *entry, *tmp;
341
342 NV_DEBUG(dev, "\n");
343
344 list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) {
345 gpuobj = list_entry(entry, struct nouveau_gpuobj, list);
346
347 NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n",
348 gpuobj, gpuobj->refcount);
349 gpuobj->refcount = 0;
350 nouveau_gpuobj_del(dev, &gpuobj);
351 }
352}
353
354int
355nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
356{
357 struct drm_nouveau_private *dev_priv = dev->dev_private;
358 struct nouveau_engine *engine = &dev_priv->engine;
359 struct nouveau_gpuobj *gpuobj;
360 int i;
361
362 NV_DEBUG(dev, "gpuobj %p\n", pgpuobj ? *pgpuobj : NULL);
363
364 if (!dev_priv || !pgpuobj || !(*pgpuobj))
365 return -EINVAL;
366 gpuobj = *pgpuobj;
367
368 if (gpuobj->refcount != 0) {
369 NV_ERROR(dev, "gpuobj refcount is %d\n", gpuobj->refcount);
370 return -EINVAL;
371 }
372
373 if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
374 engine->instmem.prepare_access(dev, true);
375 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
376 nv_wo32(dev, gpuobj, i/4, 0);
377 engine->instmem.finish_access(dev);
378 }
379
380 if (gpuobj->dtor)
381 gpuobj->dtor(dev, gpuobj);
382
383 if (gpuobj->im_backing && !(gpuobj->flags & NVOBJ_FLAG_FAKE))
384 engine->instmem.clear(dev, gpuobj);
385
386 if (gpuobj->im_pramin) {
387 if (gpuobj->flags & NVOBJ_FLAG_FAKE)
388 kfree(gpuobj->im_pramin);
389 else
Ben Skeggsb833ac22010-06-01 15:32:24 +1000390 drm_mm_put_block(gpuobj->im_pramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000391 }
392
393 list_del(&gpuobj->list);
394
395 *pgpuobj = NULL;
396 kfree(gpuobj);
397 return 0;
398}
399
400static int
401nouveau_gpuobj_instance_get(struct drm_device *dev,
402 struct nouveau_channel *chan,
403 struct nouveau_gpuobj *gpuobj, uint32_t *inst)
404{
405 struct drm_nouveau_private *dev_priv = dev->dev_private;
406 struct nouveau_gpuobj *cpramin;
407
408 /* <NV50 use PRAMIN address everywhere */
409 if (dev_priv->card_type < NV_50) {
410 *inst = gpuobj->im_pramin->start;
411 return 0;
412 }
413
414 if (chan && gpuobj->im_channel != chan) {
415 NV_ERROR(dev, "Channel mismatch: obj %d, ref %d\n",
416 gpuobj->im_channel->id, chan->id);
417 return -EINVAL;
418 }
419
420 /* NV50 channel-local instance */
421 if (chan) {
422 cpramin = chan->ramin->gpuobj;
423 *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start;
424 return 0;
425 }
426
427 /* NV50 global (VRAM) instance */
428 if (!gpuobj->im_channel) {
429 /* ...from global heap */
430 if (!gpuobj->im_backing) {
431 NV_ERROR(dev, "AII, no VRAM backing gpuobj\n");
432 return -EINVAL;
433 }
434 *inst = gpuobj->im_backing_start;
435 return 0;
436 } else {
437 /* ...from local heap */
438 cpramin = gpuobj->im_channel->ramin->gpuobj;
439 *inst = cpramin->im_backing_start +
440 (gpuobj->im_pramin->start - cpramin->im_pramin->start);
441 return 0;
442 }
443
444 return -EINVAL;
445}
446
447int
448nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan,
449 uint32_t handle, struct nouveau_gpuobj *gpuobj,
450 struct nouveau_gpuobj_ref **ref_ret)
451{
452 struct drm_nouveau_private *dev_priv = dev->dev_private;
453 struct nouveau_gpuobj_ref *ref;
454 uint32_t instance;
455 int ret;
456
457 NV_DEBUG(dev, "ch%d h=0x%08x gpuobj=%p\n",
458 chan ? chan->id : -1, handle, gpuobj);
459
460 if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL))
461 return -EINVAL;
462
463 if (!chan && !ref_ret)
464 return -EINVAL;
465
466 if (gpuobj->engine == NVOBJ_ENGINE_SW && !gpuobj->im_pramin) {
467 /* sw object */
468 instance = 0x40;
469 } else {
470 ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance);
471 if (ret)
472 return ret;
473 }
474
475 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
476 if (!ref)
477 return -ENOMEM;
478 INIT_LIST_HEAD(&ref->list);
479 ref->gpuobj = gpuobj;
480 ref->channel = chan;
481 ref->instance = instance;
482
483 if (!ref_ret) {
484 ref->handle = handle;
485
486 ret = nouveau_ramht_insert(dev, ref);
487 if (ret) {
488 kfree(ref);
489 return ret;
490 }
491 } else {
492 ref->handle = ~0;
493 *ref_ret = ref;
494 }
495
496 ref->gpuobj->refcount++;
497 return 0;
498}
499
500int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref)
501{
502 struct nouveau_gpuobj_ref *ref;
503
504 NV_DEBUG(dev, "ref %p\n", pref ? *pref : NULL);
505
506 if (!dev || !pref || *pref == NULL)
507 return -EINVAL;
508 ref = *pref;
509
510 if (ref->handle != ~0)
511 nouveau_ramht_remove(dev, ref);
512
513 if (ref->gpuobj) {
514 ref->gpuobj->refcount--;
515
516 if (ref->gpuobj->refcount == 0) {
517 if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS))
518 nouveau_gpuobj_del(dev, &ref->gpuobj);
519 }
520 }
521
522 *pref = NULL;
523 kfree(ref);
524 return 0;
525}
526
527int
528nouveau_gpuobj_new_ref(struct drm_device *dev,
529 struct nouveau_channel *oc, struct nouveau_channel *rc,
530 uint32_t handle, uint32_t size, int align,
531 uint32_t flags, struct nouveau_gpuobj_ref **ref)
532{
533 struct nouveau_gpuobj *gpuobj = NULL;
534 int ret;
535
536 ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj);
537 if (ret)
538 return ret;
539
540 ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref);
541 if (ret) {
542 nouveau_gpuobj_del(dev, &gpuobj);
543 return ret;
544 }
545
546 return 0;
547}
548
549int
550nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle,
551 struct nouveau_gpuobj_ref **ref_ret)
552{
553 struct nouveau_gpuobj_ref *ref;
554 struct list_head *entry, *tmp;
555
556 list_for_each_safe(entry, tmp, &chan->ramht_refs) {
557 ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
558
559 if (ref->handle == handle) {
560 if (ref_ret)
561 *ref_ret = ref;
562 return 0;
563 }
564 }
565
566 return -EINVAL;
567}
568
569int
570nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
571 uint32_t b_offset, uint32_t size,
572 uint32_t flags, struct nouveau_gpuobj **pgpuobj,
573 struct nouveau_gpuobj_ref **pref)
574{
575 struct drm_nouveau_private *dev_priv = dev->dev_private;
576 struct nouveau_gpuobj *gpuobj = NULL;
577 int i;
578
579 NV_DEBUG(dev,
580 "p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n",
581 p_offset, b_offset, size, flags);
582
583 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
584 if (!gpuobj)
585 return -ENOMEM;
586 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
587 gpuobj->im_channel = NULL;
588 gpuobj->flags = flags | NVOBJ_FLAG_FAKE;
589
590 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
591
592 if (p_offset != ~0) {
Ben Skeggsb833ac22010-06-01 15:32:24 +1000593 gpuobj->im_pramin = kzalloc(sizeof(struct drm_mm_node),
Ben Skeggs6ee73862009-12-11 19:24:15 +1000594 GFP_KERNEL);
595 if (!gpuobj->im_pramin) {
596 nouveau_gpuobj_del(dev, &gpuobj);
597 return -ENOMEM;
598 }
599 gpuobj->im_pramin->start = p_offset;
600 gpuobj->im_pramin->size = size;
601 }
602
603 if (b_offset != ~0) {
604 gpuobj->im_backing = (struct nouveau_bo *)-1;
605 gpuobj->im_backing_start = b_offset;
606 }
607
608 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
609 dev_priv->engine.instmem.prepare_access(dev, true);
610 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
611 nv_wo32(dev, gpuobj, i/4, 0);
612 dev_priv->engine.instmem.finish_access(dev);
613 }
614
615 if (pref) {
616 i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref);
617 if (i) {
618 nouveau_gpuobj_del(dev, &gpuobj);
619 return i;
620 }
621 }
622
623 if (pgpuobj)
624 *pgpuobj = gpuobj;
625 return 0;
626}
627
628
629static uint32_t
630nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
631{
632 struct drm_nouveau_private *dev_priv = dev->dev_private;
633
634 /*XXX: dodgy hack for now */
635 if (dev_priv->card_type >= NV_50)
636 return 24;
637 if (dev_priv->card_type >= NV_40)
638 return 32;
639 return 16;
640}
641
642/*
643 DMA objects are used to reference a piece of memory in the
644 framebuffer, PCI or AGP address space. Each object is 16 bytes big
645 and looks as follows:
646
647 entry[0]
648 11:0 class (seems like I can always use 0 here)
649 12 page table present?
650 13 page entry linear?
651 15:14 access: 0 rw, 1 ro, 2 wo
652 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
653 31:20 dma adjust (bits 0-11 of the address)
654 entry[1]
655 dma limit (size of transfer)
656 entry[X]
657 1 0 readonly, 1 readwrite
658 31:12 dma frame address of the page (bits 12-31 of the address)
659 entry[N]
660 page table terminator, same value as the first pte, as does nvidia
661 rivatv uses 0xffffffff
662
663 Non linear page tables need a list of frame addresses afterwards,
664 the rivatv project has some info on this.
665
666 The method below creates a DMA object in instance RAM and returns a handle
667 to it that can be used to set up context objects.
668*/
669int
670nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
671 uint64_t offset, uint64_t size, int access,
672 int target, struct nouveau_gpuobj **gpuobj)
673{
674 struct drm_device *dev = chan->dev;
675 struct drm_nouveau_private *dev_priv = dev->dev_private;
676 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
677 int ret;
678
679 NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
680 chan->id, class, offset, size);
681 NV_DEBUG(dev, "access=%d target=%d\n", access, target);
682
683 switch (target) {
684 case NV_DMA_TARGET_AGP:
685 offset += dev_priv->gart_info.aper_base;
686 break;
687 default:
688 break;
689 }
690
691 ret = nouveau_gpuobj_new(dev, chan,
692 nouveau_gpuobj_class_instmem_size(dev, class),
693 16, NVOBJ_FLAG_ZERO_ALLOC |
694 NVOBJ_FLAG_ZERO_FREE, gpuobj);
695 if (ret) {
696 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
697 return ret;
698 }
699
700 instmem->prepare_access(dev, true);
701
702 if (dev_priv->card_type < NV_50) {
703 uint32_t frame, adjust, pte_flags = 0;
704
705 if (access != NV_DMA_ACCESS_RO)
706 pte_flags |= (1<<1);
707 adjust = offset & 0x00000fff;
708 frame = offset & ~0x00000fff;
709
710 nv_wo32(dev, *gpuobj, 0, ((1<<12) | (1<<13) |
711 (adjust << 20) |
712 (access << 14) |
713 (target << 16) |
714 class));
715 nv_wo32(dev, *gpuobj, 1, size - 1);
716 nv_wo32(dev, *gpuobj, 2, frame | pte_flags);
717 nv_wo32(dev, *gpuobj, 3, frame | pte_flags);
718 } else {
719 uint64_t limit = offset + size - 1;
720 uint32_t flags0, flags5;
721
722 if (target == NV_DMA_TARGET_VIDMEM) {
723 flags0 = 0x00190000;
724 flags5 = 0x00010000;
725 } else {
726 flags0 = 0x7fc00000;
727 flags5 = 0x00080000;
728 }
729
730 nv_wo32(dev, *gpuobj, 0, flags0 | class);
731 nv_wo32(dev, *gpuobj, 1, lower_32_bits(limit));
732 nv_wo32(dev, *gpuobj, 2, lower_32_bits(offset));
733 nv_wo32(dev, *gpuobj, 3, ((upper_32_bits(limit) & 0xff) << 24) |
734 (upper_32_bits(offset) & 0xff));
735 nv_wo32(dev, *gpuobj, 5, flags5);
736 }
737
738 instmem->finish_access(dev);
739
740 (*gpuobj)->engine = NVOBJ_ENGINE_SW;
741 (*gpuobj)->class = class;
742 return 0;
743}
744
745int
746nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
747 uint64_t offset, uint64_t size, int access,
748 struct nouveau_gpuobj **gpuobj,
749 uint32_t *o_ret)
750{
751 struct drm_device *dev = chan->dev;
752 struct drm_nouveau_private *dev_priv = dev->dev_private;
753 int ret;
754
755 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
756 (dev_priv->card_type >= NV_50 &&
757 dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
758 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
759 offset + dev_priv->vm_gart_base,
760 size, access, NV_DMA_TARGET_AGP,
761 gpuobj);
762 if (o_ret)
763 *o_ret = 0;
764 } else
765 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
766 *gpuobj = dev_priv->gart_info.sg_ctxdma;
767 if (offset & ~0xffffffffULL) {
768 NV_ERROR(dev, "obj offset exceeds 32-bits\n");
769 return -EINVAL;
770 }
771 if (o_ret)
772 *o_ret = (uint32_t)offset;
773 ret = (*gpuobj != NULL) ? 0 : -EINVAL;
774 } else {
775 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
776 return -EINVAL;
777 }
778
779 return ret;
780}
781
782/* Context objects in the instance RAM have the following structure.
783 * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
784
785 NV4 - NV30:
786
787 entry[0]
788 11:0 class
789 12 chroma key enable
790 13 user clip enable
791 14 swizzle enable
792 17:15 patch config:
793 scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
794 18 synchronize enable
795 19 endian: 1 big, 0 little
796 21:20 dither mode
797 23 single step enable
798 24 patch status: 0 invalid, 1 valid
799 25 context_surface 0: 1 valid
800 26 context surface 1: 1 valid
801 27 context pattern: 1 valid
802 28 context rop: 1 valid
803 29,30 context beta, beta4
804 entry[1]
805 7:0 mono format
806 15:8 color format
807 31:16 notify instance address
808 entry[2]
809 15:0 dma 0 instance address
810 31:16 dma 1 instance address
811 entry[3]
812 dma method traps
813
814 NV40:
815 No idea what the exact format is. Here's what can be deducted:
816
817 entry[0]:
818 11:0 class (maybe uses more bits here?)
819 17 user clip enable
820 21:19 patch config
821 25 patch status valid ?
822 entry[1]:
823 15:0 DMA notifier (maybe 20:0)
824 entry[2]:
825 15:0 DMA 0 instance (maybe 20:0)
826 24 big endian
827 entry[3]:
828 15:0 DMA 1 instance (maybe 20:0)
829 entry[4]:
830 entry[5]:
831 set to 0?
832*/
833int
834nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
835 struct nouveau_gpuobj **gpuobj)
836{
837 struct drm_device *dev = chan->dev;
838 struct drm_nouveau_private *dev_priv = dev->dev_private;
839 int ret;
840
841 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
842
843 ret = nouveau_gpuobj_new(dev, chan,
844 nouveau_gpuobj_class_instmem_size(dev, class),
845 16,
846 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
847 gpuobj);
848 if (ret) {
849 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
850 return ret;
851 }
852
853 dev_priv->engine.instmem.prepare_access(dev, true);
854 if (dev_priv->card_type >= NV_50) {
855 nv_wo32(dev, *gpuobj, 0, class);
856 nv_wo32(dev, *gpuobj, 5, 0x00010000);
857 } else {
858 switch (class) {
859 case NV_CLASS_NULL:
860 nv_wo32(dev, *gpuobj, 0, 0x00001030);
861 nv_wo32(dev, *gpuobj, 1, 0xFFFFFFFF);
862 break;
863 default:
864 if (dev_priv->card_type >= NV_40) {
865 nv_wo32(dev, *gpuobj, 0, class);
866#ifdef __BIG_ENDIAN
867 nv_wo32(dev, *gpuobj, 2, 0x01000000);
868#endif
869 } else {
870#ifdef __BIG_ENDIAN
871 nv_wo32(dev, *gpuobj, 0, class | 0x00080000);
872#else
873 nv_wo32(dev, *gpuobj, 0, class);
874#endif
875 }
876 }
877 }
878 dev_priv->engine.instmem.finish_access(dev);
879
880 (*gpuobj)->engine = NVOBJ_ENGINE_GR;
881 (*gpuobj)->class = class;
882 return 0;
883}
884
Francisco Jerezf03a314b2009-12-26 02:42:45 +0100885int
Ben Skeggs6ee73862009-12-11 19:24:15 +1000886nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
887 struct nouveau_gpuobj **gpuobj_ret)
888{
Marcin Slusarzdd19e442010-01-30 15:41:00 +0100889 struct drm_nouveau_private *dev_priv;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000890 struct nouveau_gpuobj *gpuobj;
891
892 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
893 return -EINVAL;
Marcin Slusarzdd19e442010-01-30 15:41:00 +0100894 dev_priv = chan->dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000895
896 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
897 if (!gpuobj)
898 return -ENOMEM;
899 gpuobj->engine = NVOBJ_ENGINE_SW;
900 gpuobj->class = class;
901
902 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
903 *gpuobj_ret = gpuobj;
904 return 0;
905}
906
907static int
908nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
909{
910 struct drm_device *dev = chan->dev;
911 struct drm_nouveau_private *dev_priv = dev->dev_private;
912 struct nouveau_gpuobj *pramin = NULL;
913 uint32_t size;
914 uint32_t base;
915 int ret;
916
917 NV_DEBUG(dev, "ch%d\n", chan->id);
918
919 /* Base amount for object storage (4KiB enough?) */
920 size = 0x1000;
921 base = 0;
922
923 /* PGRAPH context */
924
925 if (dev_priv->card_type == NV_50) {
926 /* Various fixed table thingos */
927 size += 0x1400; /* mostly unknown stuff */
928 size += 0x4000; /* vm pd */
929 base = 0x6000;
930 /* RAMHT, not sure about setting size yet, 32KiB to be safe */
931 size += 0x8000;
932 /* RAMFC */
933 size += 0x1000;
934 /* PGRAPH context */
935 size += 0x70000;
936 }
937
938 NV_DEBUG(dev, "ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n",
939 chan->id, size, base);
940 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0,
941 &chan->ramin);
942 if (ret) {
943 NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
944 return ret;
945 }
946 pramin = chan->ramin->gpuobj;
947
Ben Skeggsb833ac22010-06-01 15:32:24 +1000948 ret = drm_mm_init(&chan->ramin_heap, pramin->im_pramin->start + base, size);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000949 if (ret) {
950 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
951 nouveau_gpuobj_ref_del(dev, &chan->ramin);
952 return ret;
953 }
954
955 return 0;
956}
957
958int
959nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
960 uint32_t vram_h, uint32_t tt_h)
961{
962 struct drm_device *dev = chan->dev;
963 struct drm_nouveau_private *dev_priv = dev->dev_private;
964 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
965 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
966 int ret, i;
967
968 INIT_LIST_HEAD(&chan->ramht_refs);
969
970 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
971
972 /* Reserve a block of PRAMIN for the channel
973 *XXX: maybe on <NV50 too at some point
974 */
975 if (0 || dev_priv->card_type == NV_50) {
976 ret = nouveau_gpuobj_channel_init_pramin(chan);
977 if (ret) {
978 NV_ERROR(dev, "init pramin\n");
979 return ret;
980 }
981 }
982
983 /* NV50 VM
984 * - Allocate per-channel page-directory
985 * - Map GART and VRAM into the channel's address space at the
986 * locations determined during init.
987 */
988 if (dev_priv->card_type >= NV_50) {
989 uint32_t vm_offset, pde;
990
991 instmem->prepare_access(dev, true);
992
993 vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
994 vm_offset += chan->ramin->gpuobj->im_pramin->start;
995
996 ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
997 0, &chan->vm_pd, NULL);
998 if (ret) {
999 instmem->finish_access(dev);
1000 return ret;
1001 }
1002 for (i = 0; i < 0x4000; i += 8) {
1003 nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000);
1004 nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe);
1005 }
1006
1007 pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 2;
1008 ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
1009 dev_priv->gart_info.sg_ctxdma,
1010 &chan->vm_gart_pt);
1011 if (ret) {
1012 instmem->finish_access(dev);
1013 return ret;
1014 }
1015 nv_wo32(dev, chan->vm_pd, pde++,
1016 chan->vm_gart_pt->instance | 0x03);
1017 nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
1018
1019 pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 2;
1020 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
1021 ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
1022 dev_priv->vm_vram_pt[i],
1023 &chan->vm_vram_pt[i]);
1024 if (ret) {
1025 instmem->finish_access(dev);
1026 return ret;
1027 }
1028
1029 nv_wo32(dev, chan->vm_pd, pde++,
1030 chan->vm_vram_pt[i]->instance | 0x61);
1031 nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
1032 }
1033
1034 instmem->finish_access(dev);
1035 }
1036
1037 /* RAMHT */
1038 if (dev_priv->card_type < NV_50) {
1039 ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht,
1040 &chan->ramht);
1041 if (ret)
1042 return ret;
1043 } else {
1044 ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0,
1045 0x8000, 16,
1046 NVOBJ_FLAG_ZERO_ALLOC,
1047 &chan->ramht);
1048 if (ret)
1049 return ret;
1050 }
1051
1052 /* VRAM ctxdma */
1053 if (dev_priv->card_type >= NV_50) {
1054 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
1055 0, dev_priv->vm_end,
1056 NV_DMA_ACCESS_RW,
1057 NV_DMA_TARGET_AGP, &vram);
1058 if (ret) {
1059 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
1060 return ret;
1061 }
1062 } else {
1063 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
1064 0, dev_priv->fb_available_size,
1065 NV_DMA_ACCESS_RW,
1066 NV_DMA_TARGET_VIDMEM, &vram);
1067 if (ret) {
1068 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
1069 return ret;
1070 }
1071 }
1072
1073 ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL);
1074 if (ret) {
1075 NV_ERROR(dev, "Error referencing VRAM ctxdma: %d\n", ret);
1076 return ret;
1077 }
1078
1079 /* TT memory ctxdma */
1080 if (dev_priv->card_type >= NV_50) {
1081 tt = vram;
1082 } else
1083 if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
1084 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
1085 dev_priv->gart_info.aper_size,
1086 NV_DMA_ACCESS_RW, &tt, NULL);
1087 } else {
1088 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
1089 ret = -EINVAL;
1090 }
1091
1092 if (ret) {
1093 NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
1094 return ret;
1095 }
1096
1097 ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL);
1098 if (ret) {
1099 NV_ERROR(dev, "Error referencing TT ctxdma: %d\n", ret);
1100 return ret;
1101 }
1102
1103 return 0;
1104}
1105
1106void
1107nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
1108{
1109 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
1110 struct drm_device *dev = chan->dev;
1111 struct list_head *entry, *tmp;
1112 struct nouveau_gpuobj_ref *ref;
1113 int i;
1114
1115 NV_DEBUG(dev, "ch%d\n", chan->id);
1116
1117 if (!chan->ramht_refs.next)
1118 return;
1119
1120 list_for_each_safe(entry, tmp, &chan->ramht_refs) {
1121 ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
1122
1123 nouveau_gpuobj_ref_del(dev, &ref);
1124 }
1125
1126 nouveau_gpuobj_ref_del(dev, &chan->ramht);
1127
1128 nouveau_gpuobj_del(dev, &chan->vm_pd);
1129 nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt);
1130 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
1131 nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
1132
Ben Skeggsb833ac22010-06-01 15:32:24 +10001133 if (chan->ramin_heap.free_stack.next)
1134 drm_mm_takedown(&chan->ramin_heap);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001135 if (chan->ramin)
1136 nouveau_gpuobj_ref_del(dev, &chan->ramin);
1137
1138}
1139
1140int
1141nouveau_gpuobj_suspend(struct drm_device *dev)
1142{
1143 struct drm_nouveau_private *dev_priv = dev->dev_private;
1144 struct nouveau_gpuobj *gpuobj;
1145 int i;
1146
1147 if (dev_priv->card_type < NV_50) {
1148 dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram);
1149 if (!dev_priv->susres.ramin_copy)
1150 return -ENOMEM;
1151
1152 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
1153 dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i);
1154 return 0;
1155 }
1156
1157 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
1158 if (!gpuobj->im_backing || (gpuobj->flags & NVOBJ_FLAG_FAKE))
1159 continue;
1160
1161 gpuobj->im_backing_suspend = vmalloc(gpuobj->im_pramin->size);
1162 if (!gpuobj->im_backing_suspend) {
1163 nouveau_gpuobj_resume(dev);
1164 return -ENOMEM;
1165 }
1166
1167 dev_priv->engine.instmem.prepare_access(dev, false);
1168 for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
1169 gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i);
1170 dev_priv->engine.instmem.finish_access(dev);
1171 }
1172
1173 return 0;
1174}
1175
1176void
1177nouveau_gpuobj_suspend_cleanup(struct drm_device *dev)
1178{
1179 struct drm_nouveau_private *dev_priv = dev->dev_private;
1180 struct nouveau_gpuobj *gpuobj;
1181
1182 if (dev_priv->card_type < NV_50) {
1183 vfree(dev_priv->susres.ramin_copy);
1184 dev_priv->susres.ramin_copy = NULL;
1185 return;
1186 }
1187
1188 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
1189 if (!gpuobj->im_backing_suspend)
1190 continue;
1191
1192 vfree(gpuobj->im_backing_suspend);
1193 gpuobj->im_backing_suspend = NULL;
1194 }
1195}
1196
1197void
1198nouveau_gpuobj_resume(struct drm_device *dev)
1199{
1200 struct drm_nouveau_private *dev_priv = dev->dev_private;
1201 struct nouveau_gpuobj *gpuobj;
1202 int i;
1203
1204 if (dev_priv->card_type < NV_50) {
1205 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
1206 nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]);
1207 nouveau_gpuobj_suspend_cleanup(dev);
1208 return;
1209 }
1210
1211 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
1212 if (!gpuobj->im_backing_suspend)
1213 continue;
1214
1215 dev_priv->engine.instmem.prepare_access(dev, true);
1216 for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
1217 nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]);
1218 dev_priv->engine.instmem.finish_access(dev);
1219 }
1220
1221 nouveau_gpuobj_suspend_cleanup(dev);
1222}
1223
1224int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
1225 struct drm_file *file_priv)
1226{
1227 struct drm_nouveau_private *dev_priv = dev->dev_private;
1228 struct drm_nouveau_grobj_alloc *init = data;
1229 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
1230 struct nouveau_pgraph_object_class *grc;
1231 struct nouveau_gpuobj *gr = NULL;
1232 struct nouveau_channel *chan;
1233 int ret;
1234
1235 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
1236 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
1237
1238 if (init->handle == ~0)
1239 return -EINVAL;
1240
1241 grc = pgraph->grclass;
1242 while (grc->id) {
1243 if (grc->id == init->class)
1244 break;
1245 grc++;
1246 }
1247
1248 if (!grc->id) {
1249 NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class);
1250 return -EPERM;
1251 }
1252
1253 if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0)
1254 return -EEXIST;
1255
1256 if (!grc->software)
1257 ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
1258 else
1259 ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
1260
1261 if (ret) {
1262 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
1263 ret, init->channel, init->handle);
1264 return ret;
1265 }
1266
1267 ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL);
1268 if (ret) {
1269 NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
1270 ret, init->channel, init->handle);
1271 nouveau_gpuobj_del(dev, &gr);
1272 return ret;
1273 }
1274
1275 return 0;
1276}
1277
1278int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
1279 struct drm_file *file_priv)
1280{
1281 struct drm_nouveau_gpuobj_free *objfree = data;
1282 struct nouveau_gpuobj_ref *ref;
1283 struct nouveau_channel *chan;
1284 int ret;
1285
1286 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
1287 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
1288
1289 ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref);
1290 if (ret)
1291 return ret;
1292 nouveau_gpuobj_ref_del(dev, &ref);
1293
1294 return 0;
1295}