blob: dd1859f7d8b0a12f11bb875c1be1925e44b29117 [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright (C) 2006 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28/*
29 * Authors:
30 * Ben Skeggs <darktama@iinet.net.au>
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "nouveau_drv.h"
36#include "nouveau_drm.h"
Ben Skeggs479dcae2010-09-01 15:24:28 +100037#include "nouveau_ramht.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100038
Ben Skeggsb8c157d2010-10-20 10:39:35 +100039struct nouveau_gpuobj_method {
40 struct list_head head;
41 u32 mthd;
42 int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);
43};
44
45struct nouveau_gpuobj_class {
46 struct list_head head;
47 struct list_head methods;
48 u32 id;
49 u32 engine;
50};
51
52int
53nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)
54{
55 struct drm_nouveau_private *dev_priv = dev->dev_private;
56 struct nouveau_gpuobj_class *oc;
57
58 oc = kzalloc(sizeof(*oc), GFP_KERNEL);
59 if (!oc)
60 return -ENOMEM;
61
62 INIT_LIST_HEAD(&oc->methods);
63 oc->id = class;
64 oc->engine = engine;
65 list_add(&oc->head, &dev_priv->classes);
66 return 0;
67}
68
69int
70nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,
71 int (*exec)(struct nouveau_channel *, u32, u32, u32))
72{
73 struct drm_nouveau_private *dev_priv = dev->dev_private;
74 struct nouveau_gpuobj_method *om;
75 struct nouveau_gpuobj_class *oc;
76
77 list_for_each_entry(oc, &dev_priv->classes, head) {
78 if (oc->id == class)
79 goto found;
80 }
81
82 return -EINVAL;
83
84found:
85 om = kzalloc(sizeof(*om), GFP_KERNEL);
86 if (!om)
87 return -ENOMEM;
88
89 om->mthd = mthd;
90 om->exec = exec;
91 list_add(&om->head, &oc->methods);
92 return 0;
93}
94
95int
96nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,
97 u32 class, u32 mthd, u32 data)
98{
99 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
100 struct nouveau_gpuobj_method *om;
101 struct nouveau_gpuobj_class *oc;
102
103 list_for_each_entry(oc, &dev_priv->classes, head) {
104 if (oc->id != class)
105 continue;
106
107 list_for_each_entry(om, &oc->methods, head) {
108 if (om->mthd == mthd)
109 return om->exec(chan, class, mthd, data);
110 }
111 }
112
113 return -ENOENT;
114}
115
Ben Skeggs274fec92010-11-03 13:16:18 +1000116int
117nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
118 u32 class, u32 mthd, u32 data)
119{
120 struct drm_nouveau_private *dev_priv = dev->dev_private;
121 struct nouveau_channel *chan = NULL;
122 unsigned long flags;
123 int ret = -EINVAL;
124
125 spin_lock_irqsave(&dev_priv->channels.lock, flags);
126 if (chid > 0 && chid < dev_priv->engine.fifo.channels)
127 chan = dev_priv->channels.ptr[chid];
128 if (chan)
129 ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
130 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
131 return ret;
132}
133
Ben Skeggs6ee73862009-12-11 19:24:15 +1000134/* NVidia uses context objects to drive drawing operations.
135
136 Context objects can be selected into 8 subchannels in the FIFO,
137 and then used via DMA command buffers.
138
139 A context object is referenced by a user defined handle (CARD32). The HW
140 looks up graphics objects in a hash table in the instance RAM.
141
142 An entry in the hash table consists of 2 CARD32. The first CARD32 contains
143 the handle, the second one a bitfield, that contains the address of the
144 object in instance RAM.
145
146 The format of the second CARD32 seems to be:
147
148 NV4 to NV30:
149
150 15: 0 instance_addr >> 4
151 17:16 engine (here uses 1 = graphics)
152 28:24 channel id (here uses 0)
153 31 valid (use 1)
154
155 NV40:
156
157 15: 0 instance_addr >> 4 (maybe 19-0)
158 21:20 engine (here uses 1 = graphics)
159 I'm unsure about the other bits, but using 0 seems to work.
160
161 The key into the hash table depends on the object handle and channel id and
162 is given as:
163*/
Ben Skeggs6ee73862009-12-11 19:24:15 +1000164
165int
166nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
167 uint32_t size, int align, uint32_t flags,
168 struct nouveau_gpuobj **gpuobj_ret)
169{
170 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggse41115d2010-11-01 11:45:02 +1000171 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000172 struct nouveau_gpuobj *gpuobj;
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000173 struct drm_mm_node *ramin = NULL;
Ben Skeggse41115d2010-11-01 11:45:02 +1000174 int ret, i;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000175
176 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
177 chan ? chan->id : -1, size, align, flags);
178
Ben Skeggs6ee73862009-12-11 19:24:15 +1000179 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
180 if (!gpuobj)
181 return -ENOMEM;
182 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000183 gpuobj->dev = dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000184 gpuobj->flags = flags;
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000185 kref_init(&gpuobj->refcount);
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000186 gpuobj->size = size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000187
Ben Skeggse05d7ea2010-09-01 15:24:38 +1000188 spin_lock(&dev_priv->ramin_lock);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000189 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
Ben Skeggse05d7ea2010-09-01 15:24:38 +1000190 spin_unlock(&dev_priv->ramin_lock);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000191
Ben Skeggs6ee73862009-12-11 19:24:15 +1000192 if (chan) {
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000193 ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
194 if (ramin)
195 ramin = drm_mm_get_block(ramin, size, align);
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000196 if (!ramin) {
197 nouveau_gpuobj_ref(NULL, &gpuobj);
198 return -ENOMEM;
199 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000200
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000201 gpuobj->pinst = chan->ramin->pinst;
202 if (gpuobj->pinst != ~0)
Ben Skeggse41115d2010-11-01 11:45:02 +1000203 gpuobj->pinst += ramin->start;
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000204
Francisco Jerezca130c22010-11-20 14:42:57 +0100205 gpuobj->cinst = ramin->start;
Ben Skeggse41115d2010-11-01 11:45:02 +1000206 gpuobj->vinst = ramin->start + chan->ramin->vinst;
207 gpuobj->node = ramin;
208 } else {
209 ret = instmem->get(gpuobj, size, align);
210 if (ret) {
211 nouveau_gpuobj_ref(NULL, &gpuobj);
212 return ret;
213 }
214
215 ret = -ENOSYS;
Ben Skeggsa11c3192010-08-27 10:00:25 +1000216 if (!(flags & NVOBJ_FLAG_DONT_MAP))
Ben Skeggse41115d2010-11-01 11:45:02 +1000217 ret = instmem->map(gpuobj);
218 if (ret)
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000219 gpuobj->pinst = ~0;
Ben Skeggse41115d2010-11-01 11:45:02 +1000220
221 gpuobj->cinst = NVOBJ_CINST_GLOBAL;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000222 }
223
Ben Skeggs6ee73862009-12-11 19:24:15 +1000224 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000225 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000226 nv_wo32(gpuobj, i, 0);
Ben Skeggse41115d2010-11-01 11:45:02 +1000227 instmem->flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000228 }
229
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000230
Ben Skeggs6ee73862009-12-11 19:24:15 +1000231 *gpuobj_ret = gpuobj;
232 return 0;
233}
234
235int
Ben Skeggsfbd28952010-09-01 15:24:34 +1000236nouveau_gpuobj_init(struct drm_device *dev)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000237{
238 struct drm_nouveau_private *dev_priv = dev->dev_private;
239
240 NV_DEBUG(dev, "\n");
241
242 INIT_LIST_HEAD(&dev_priv->gpuobj_list);
Ben Skeggsbd2e5972010-10-19 20:06:01 +1000243 INIT_LIST_HEAD(&dev_priv->classes);
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000244 spin_lock_init(&dev_priv->ramin_lock);
245 dev_priv->ramin_base = ~0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000246
247 return 0;
248}
249
Ben Skeggs6ee73862009-12-11 19:24:15 +1000250void
251nouveau_gpuobj_takedown(struct drm_device *dev)
252{
253 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000254 struct nouveau_gpuobj_method *om, *tm;
255 struct nouveau_gpuobj_class *oc, *tc;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000256
257 NV_DEBUG(dev, "\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000258
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000259 list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {
260 list_for_each_entry_safe(om, tm, &oc->methods, head) {
261 list_del(&om->head);
262 kfree(om);
263 }
264 list_del(&oc->head);
265 kfree(oc);
266 }
267
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000268 BUG_ON(!list_empty(&dev_priv->gpuobj_list));
Ben Skeggs6ee73862009-12-11 19:24:15 +1000269}
270
Ben Skeggs185abec2010-09-01 15:24:39 +1000271
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000272static void
273nouveau_gpuobj_del(struct kref *ref)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000274{
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000275 struct nouveau_gpuobj *gpuobj =
276 container_of(ref, struct nouveau_gpuobj, refcount);
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000277 struct drm_device *dev = gpuobj->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000278 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggse41115d2010-11-01 11:45:02 +1000279 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000280 int i;
281
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000282 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000283
Ben Skeggse41115d2010-11-01 11:45:02 +1000284 if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000285 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000286 nv_wo32(gpuobj, i, 0);
Ben Skeggse41115d2010-11-01 11:45:02 +1000287 instmem->flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000288 }
289
290 if (gpuobj->dtor)
291 gpuobj->dtor(dev, gpuobj);
292
Ben Skeggse41115d2010-11-01 11:45:02 +1000293 if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {
294 if (gpuobj->node) {
295 instmem->unmap(gpuobj);
296 instmem->put(gpuobj);
297 }
298 } else {
299 if (gpuobj->node) {
300 spin_lock(&dev_priv->ramin_lock);
301 drm_mm_put_block(gpuobj->node);
302 spin_unlock(&dev_priv->ramin_lock);
303 }
304 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000305
Ben Skeggse05d7ea2010-09-01 15:24:38 +1000306 spin_lock(&dev_priv->ramin_lock);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000307 list_del(&gpuobj->list);
Ben Skeggse05d7ea2010-09-01 15:24:38 +1000308 spin_unlock(&dev_priv->ramin_lock);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000309
Ben Skeggs6ee73862009-12-11 19:24:15 +1000310 kfree(gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000311}
312
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000313void
314nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000315{
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000316 if (ref)
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000317 kref_get(&ref->refcount);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000318
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000319 if (*ptr)
320 kref_put(&(*ptr)->refcount, nouveau_gpuobj_del);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000321
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000322 *ptr = ref;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000323}
324
325int
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000326nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
327 u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000328{
329 struct drm_nouveau_private *dev_priv = dev->dev_private;
330 struct nouveau_gpuobj *gpuobj = NULL;
331 int i;
332
333 NV_DEBUG(dev,
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000334 "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",
335 pinst, vinst, size, flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000336
337 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
338 if (!gpuobj)
339 return -ENOMEM;
340 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000341 gpuobj->dev = dev;
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000342 gpuobj->flags = flags;
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000343 kref_init(&gpuobj->refcount);
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000344 gpuobj->size = size;
345 gpuobj->pinst = pinst;
Ben Skeggse41115d2010-11-01 11:45:02 +1000346 gpuobj->cinst = NVOBJ_CINST_GLOBAL;
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000347 gpuobj->vinst = vinst;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000348
Ben Skeggs6ee73862009-12-11 19:24:15 +1000349 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000350 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000351 nv_wo32(gpuobj, i, 0);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000352 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000353 }
354
Ben Skeggse05d7ea2010-09-01 15:24:38 +1000355 spin_lock(&dev_priv->ramin_lock);
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000356 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
Ben Skeggse05d7ea2010-09-01 15:24:38 +1000357 spin_unlock(&dev_priv->ramin_lock);
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000358 *pgpuobj = gpuobj;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000359 return 0;
360}
361
362
363static uint32_t
364nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
365{
366 struct drm_nouveau_private *dev_priv = dev->dev_private;
367
368 /*XXX: dodgy hack for now */
369 if (dev_priv->card_type >= NV_50)
370 return 24;
371 if (dev_priv->card_type >= NV_40)
372 return 32;
373 return 16;
374}
375
376/*
377 DMA objects are used to reference a piece of memory in the
378 framebuffer, PCI or AGP address space. Each object is 16 bytes big
379 and looks as follows:
380
381 entry[0]
382 11:0 class (seems like I can always use 0 here)
383 12 page table present?
384 13 page entry linear?
385 15:14 access: 0 rw, 1 ro, 2 wo
386 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
387 31:20 dma adjust (bits 0-11 of the address)
388 entry[1]
389 dma limit (size of transfer)
390 entry[X]
391 1 0 readonly, 1 readwrite
392 31:12 dma frame address of the page (bits 12-31 of the address)
393 entry[N]
394 page table terminator, same value as the first pte, as does nvidia
395 rivatv uses 0xffffffff
396
397 Non linear page tables need a list of frame addresses afterwards,
398 the rivatv project has some info on this.
399
400 The method below creates a DMA object in instance RAM and returns a handle
401 to it that can be used to set up context objects.
402*/
Ben Skeggs6ee73862009-12-11 19:24:15 +1000403
Ben Skeggs7f4a1952010-11-16 11:50:09 +1000404void
405nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
406 u64 base, u64 size, int target, int access,
407 u32 type, u32 comp)
408{
409 struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
410 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
411 u32 flags0;
412
413 flags0 = (comp << 29) | (type << 22) | class;
414 flags0 |= 0x00100000;
415
416 switch (access) {
417 case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
418 case NV_MEM_ACCESS_RW:
419 case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
420 default:
421 break;
422 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000423
424 switch (target) {
Ben Skeggs7f4a1952010-11-16 11:50:09 +1000425 case NV_MEM_TARGET_VRAM:
426 flags0 |= 0x00010000;
427 break;
428 case NV_MEM_TARGET_PCI:
429 flags0 |= 0x00020000;
430 break;
431 case NV_MEM_TARGET_PCI_NOSNOOP:
432 flags0 |= 0x00030000;
433 break;
434 case NV_MEM_TARGET_GART:
435 base += dev_priv->vm_gart_base;
436 default:
437 flags0 &= ~0x00100000;
438 break;
439 }
440
441 /* convert to base + limit */
442 size = (base + size) - 1;
443
444 nv_wo32(obj, offset + 0x00, flags0);
445 nv_wo32(obj, offset + 0x04, lower_32_bits(size));
446 nv_wo32(obj, offset + 0x08, lower_32_bits(base));
447 nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
448 upper_32_bits(base));
449 nv_wo32(obj, offset + 0x10, 0x00000000);
450 nv_wo32(obj, offset + 0x14, 0x00000000);
451
452 pinstmem->flush(obj->dev);
453}
454
455int
456nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
457 int target, int access, u32 type, u32 comp,
458 struct nouveau_gpuobj **pobj)
459{
460 struct drm_device *dev = chan->dev;
461 int ret;
462
Ben Skeggsa0fd9b92010-11-26 10:32:22 +1000463 ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj);
Ben Skeggs7f4a1952010-11-16 11:50:09 +1000464 if (ret)
465 return ret;
466
467 nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
468 access, type, comp);
469 return 0;
470}
471
472int
473nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
474 u64 size, int access, int target,
475 struct nouveau_gpuobj **pobj)
476{
477 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
478 struct drm_device *dev = chan->dev;
479 struct nouveau_gpuobj *obj;
480 u32 page_addr, flags0, flags2;
481 int ret;
482
483 if (dev_priv->card_type >= NV_50) {
484 u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
485 u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
486
487 return nv50_gpuobj_dma_new(chan, class, base, size,
488 target, access, type, comp, pobj);
489 }
490
491 if (target == NV_MEM_TARGET_GART) {
492 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
493 target = NV_MEM_TARGET_PCI_NOSNOOP;
494 base += dev_priv->gart_info.aper_base;
495 } else
496 if (base != 0) {
497 ret = nouveau_sgdma_get_page(dev, base, &page_addr);
498 if (ret)
499 return ret;
500
501 target = NV_MEM_TARGET_PCI;
502 base = page_addr;
503 } else {
504 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, pobj);
505 return 0;
506 }
507 }
508
509 flags0 = class;
510 flags0 |= 0x00003000; /* PT present, PT linear */
511 flags2 = 0;
512
513 switch (target) {
514 case NV_MEM_TARGET_PCI:
515 flags0 |= 0x00020000;
516 break;
517 case NV_MEM_TARGET_PCI_NOSNOOP:
518 flags0 |= 0x00030000;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000519 break;
520 default:
521 break;
522 }
523
Ben Skeggs7f4a1952010-11-16 11:50:09 +1000524 switch (access) {
525 case NV_MEM_ACCESS_RO:
526 flags0 |= 0x00004000;
527 break;
528 case NV_MEM_ACCESS_WO:
529 flags0 |= 0x00008000;
530 default:
531 flags2 |= 0x00000002;
532 break;
533 }
534
535 flags0 |= (base & 0x00000fff) << 20;
536 flags2 |= (base & 0xfffff000);
537
Ben Skeggsa0fd9b92010-11-26 10:32:22 +1000538 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
Ben Skeggs7f4a1952010-11-16 11:50:09 +1000539 if (ret)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000540 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000541
Ben Skeggs7f4a1952010-11-16 11:50:09 +1000542 nv_wo32(obj, 0x00, flags0);
543 nv_wo32(obj, 0x04, size - 1);
544 nv_wo32(obj, 0x08, flags2);
545 nv_wo32(obj, 0x0c, flags2);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000546
Ben Skeggs7f4a1952010-11-16 11:50:09 +1000547 obj->engine = NVOBJ_ENGINE_SW;
548 obj->class = class;
549 *pobj = obj;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000550 return 0;
551}
552
Ben Skeggs6ee73862009-12-11 19:24:15 +1000553/* Context objects in the instance RAM have the following structure.
554 * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
555
556 NV4 - NV30:
557
558 entry[0]
559 11:0 class
560 12 chroma key enable
561 13 user clip enable
562 14 swizzle enable
563 17:15 patch config:
564 scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
565 18 synchronize enable
566 19 endian: 1 big, 0 little
567 21:20 dither mode
568 23 single step enable
569 24 patch status: 0 invalid, 1 valid
570 25 context_surface 0: 1 valid
571 26 context surface 1: 1 valid
572 27 context pattern: 1 valid
573 28 context rop: 1 valid
574 29,30 context beta, beta4
575 entry[1]
576 7:0 mono format
577 15:8 color format
578 31:16 notify instance address
579 entry[2]
580 15:0 dma 0 instance address
581 31:16 dma 1 instance address
582 entry[3]
583 dma method traps
584
585 NV40:
586 No idea what the exact format is. Here's what can be deducted:
587
588 entry[0]:
589 11:0 class (maybe uses more bits here?)
590 17 user clip enable
591 21:19 patch config
592 25 patch status valid ?
593 entry[1]:
594 15:0 DMA notifier (maybe 20:0)
595 entry[2]:
596 15:0 DMA 0 instance (maybe 20:0)
597 24 big endian
598 entry[3]:
599 15:0 DMA 1 instance (maybe 20:0)
600 entry[4]:
601 entry[5]:
602 set to 0?
603*/
Ben Skeggsa6a1a382010-10-19 19:57:34 +1000604static int
605nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
606 struct nouveau_gpuobj **gpuobj_ret)
607{
Ben Skeggsceac3092010-11-23 10:10:24 +1000608 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
Ben Skeggsa6a1a382010-10-19 19:57:34 +1000609 struct nouveau_gpuobj *gpuobj;
610
Ben Skeggsa6a1a382010-10-19 19:57:34 +1000611 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
612 if (!gpuobj)
613 return -ENOMEM;
614 gpuobj->dev = chan->dev;
615 gpuobj->engine = NVOBJ_ENGINE_SW;
616 gpuobj->class = class;
617 kref_init(&gpuobj->refcount);
618 gpuobj->cinst = 0x40;
619
620 spin_lock(&dev_priv->ramin_lock);
621 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
622 spin_unlock(&dev_priv->ramin_lock);
623 *gpuobj_ret = gpuobj;
624 return 0;
625}
626
Ben Skeggs6ee73862009-12-11 19:24:15 +1000627int
Ben Skeggsceac3092010-11-23 10:10:24 +1000628nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000629{
Ben Skeggsa6a1a382010-10-19 19:57:34 +1000630 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000631 struct drm_device *dev = chan->dev;
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000632 struct nouveau_gpuobj_class *oc;
Ben Skeggsceac3092010-11-23 10:10:24 +1000633 struct nouveau_gpuobj *gpuobj;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000634 int ret;
635
636 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
637
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000638 list_for_each_entry(oc, &dev_priv->classes, head) {
639 if (oc->id == class)
640 goto found;
Ben Skeggsa6a1a382010-10-19 19:57:34 +1000641 }
642
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000643 NV_ERROR(dev, "illegal object class: 0x%x\n", class);
644 return -EINVAL;
Ben Skeggsa6a1a382010-10-19 19:57:34 +1000645
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000646found:
Ben Skeggsf4512e62010-10-20 11:47:09 +1000647 switch (oc->engine) {
Ben Skeggsceac3092010-11-23 10:10:24 +1000648 case NVOBJ_ENGINE_SW:
649 ret = nouveau_gpuobj_sw_new(chan, class, &gpuobj);
650 if (ret)
651 return ret;
652 goto insert;
Ben Skeggsf4512e62010-10-20 11:47:09 +1000653 case NVOBJ_ENGINE_GR:
654 if (dev_priv->card_type >= NV_50 && !chan->ramin_grctx) {
655 struct nouveau_pgraph_engine *pgraph =
656 &dev_priv->engine.graph;
657
658 ret = pgraph->create_context(chan);
659 if (ret)
660 return ret;
661 }
662 break;
663 case NVOBJ_ENGINE_CRYPT:
664 if (!chan->crypt_ctx) {
665 struct nouveau_crypt_engine *pcrypt =
666 &dev_priv->engine.crypt;
667
668 ret = pcrypt->create_context(chan);
669 if (ret)
670 return ret;
671 }
672 break;
673 }
674
Ben Skeggs6ee73862009-12-11 19:24:15 +1000675 ret = nouveau_gpuobj_new(dev, chan,
676 nouveau_gpuobj_class_instmem_size(dev, class),
677 16,
678 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
Ben Skeggsceac3092010-11-23 10:10:24 +1000679 &gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000680 if (ret) {
Ben Skeggsa6a1a382010-10-19 19:57:34 +1000681 NV_ERROR(dev, "error creating gpuobj: %d\n", ret);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000682 return ret;
683 }
684
Ben Skeggs6ee73862009-12-11 19:24:15 +1000685 if (dev_priv->card_type >= NV_50) {
Ben Skeggsceac3092010-11-23 10:10:24 +1000686 nv_wo32(gpuobj, 0, class);
687 nv_wo32(gpuobj, 20, 0x00010000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000688 } else {
689 switch (class) {
690 case NV_CLASS_NULL:
Ben Skeggsceac3092010-11-23 10:10:24 +1000691 nv_wo32(gpuobj, 0, 0x00001030);
692 nv_wo32(gpuobj, 4, 0xFFFFFFFF);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000693 break;
694 default:
695 if (dev_priv->card_type >= NV_40) {
Ben Skeggsceac3092010-11-23 10:10:24 +1000696 nv_wo32(gpuobj, 0, class);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000697#ifdef __BIG_ENDIAN
Ben Skeggsceac3092010-11-23 10:10:24 +1000698 nv_wo32(gpuobj, 8, 0x01000000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000699#endif
700 } else {
701#ifdef __BIG_ENDIAN
Ben Skeggsceac3092010-11-23 10:10:24 +1000702 nv_wo32(gpuobj, 0, class | 0x00080000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000703#else
Ben Skeggsceac3092010-11-23 10:10:24 +1000704 nv_wo32(gpuobj, 0, class);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000705#endif
706 }
707 }
708 }
Ben Skeggsf56cb862010-07-08 11:29:10 +1000709 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000710
Ben Skeggsceac3092010-11-23 10:10:24 +1000711 gpuobj->engine = oc->engine;
712 gpuobj->class = oc->id;
713
714insert:
715 ret = nouveau_ramht_insert(chan, handle, gpuobj);
716 if (ret)
717 NV_ERROR(dev, "error adding gpuobj to RAMHT: %d\n", ret);
718 nouveau_gpuobj_ref(NULL, &gpuobj);
719 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000720}
721
Ben Skeggs6ee73862009-12-11 19:24:15 +1000722static int
723nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
724{
725 struct drm_device *dev = chan->dev;
726 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000727 uint32_t size;
728 uint32_t base;
729 int ret;
730
731 NV_DEBUG(dev, "ch%d\n", chan->id);
732
733 /* Base amount for object storage (4KiB enough?) */
Ben Skeggsbd2e5972010-10-19 20:06:01 +1000734 size = 0x2000;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000735 base = 0;
736
737 /* PGRAPH context */
Ben Skeggs816544b2010-07-08 13:15:05 +1000738 size += dev_priv->engine.graph.grctx_size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000739
740 if (dev_priv->card_type == NV_50) {
741 /* Various fixed table thingos */
742 size += 0x1400; /* mostly unknown stuff */
743 size += 0x4000; /* vm pd */
744 base = 0x6000;
745 /* RAMHT, not sure about setting size yet, 32KiB to be safe */
746 size += 0x8000;
747 /* RAMFC */
748 size += 0x1000;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000749 }
750
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000751 ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000752 if (ret) {
753 NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
754 return ret;
755 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000756
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000757 ret = drm_mm_init(&chan->ramin_heap, base, size);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000758 if (ret) {
759 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000760 nouveau_gpuobj_ref(NULL, &chan->ramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000761 return ret;
762 }
763
764 return 0;
765}
766
767int
768nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
769 uint32_t vram_h, uint32_t tt_h)
770{
771 struct drm_device *dev = chan->dev;
772 struct drm_nouveau_private *dev_priv = dev->dev_private;
773 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
774 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
775 int ret, i;
776
Ben Skeggs6ee73862009-12-11 19:24:15 +1000777 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
778
Ben Skeggs816544b2010-07-08 13:15:05 +1000779 /* Allocate a chunk of memory for per-channel object storage */
780 ret = nouveau_gpuobj_channel_init_pramin(chan);
781 if (ret) {
782 NV_ERROR(dev, "init pramin\n");
783 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000784 }
785
786 /* NV50 VM
787 * - Allocate per-channel page-directory
788 * - Map GART and VRAM into the channel's address space at the
789 * locations determined during init.
790 */
791 if (dev_priv->card_type >= NV_50) {
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000792 u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
793 u64 vm_vinst = chan->ramin->vinst + pgd_offs;
794 u32 vm_pinst = chan->ramin->pinst;
795 u32 pde;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000796
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000797 if (vm_pinst != ~0)
798 vm_pinst += pgd_offs;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000799
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000800 ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000,
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000801 0, &chan->vm_pd);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000802 if (ret)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000803 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000804 for (i = 0; i < 0x4000; i += 8) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000805 nv_wo32(chan->vm_pd, i + 0, 0x00000000);
806 nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000807 }
808
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000809 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma,
810 &chan->vm_gart_pt);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000811 pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000812 nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000813 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000814
Ben Skeggsb3beb162010-09-01 15:24:29 +1000815 pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000816 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000817 nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i],
818 &chan->vm_vram_pt[i]);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000819
Ben Skeggsb3beb162010-09-01 15:24:29 +1000820 nv_wo32(chan->vm_pd, pde + 0,
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000821 chan->vm_vram_pt[i]->vinst | 0x61);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000822 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
823 pde += 8;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000824 }
825
Ben Skeggsf56cb862010-07-08 11:29:10 +1000826 instmem->flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000827 }
828
829 /* RAMHT */
830 if (dev_priv->card_type < NV_50) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000831 nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
832 } else {
833 struct nouveau_gpuobj *ramht = NULL;
834
835 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
836 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000837 if (ret)
838 return ret;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000839
840 ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
841 nouveau_gpuobj_ref(NULL, &ramht);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000842 if (ret)
843 return ret;
844 }
845
846 /* VRAM ctxdma */
847 if (dev_priv->card_type >= NV_50) {
848 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
849 0, dev_priv->vm_end,
Ben Skeggs7f4a1952010-11-16 11:50:09 +1000850 NV_MEM_ACCESS_RW,
851 NV_MEM_TARGET_VM, &vram);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000852 if (ret) {
853 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
854 return ret;
855 }
856 } else {
857 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000858 0, dev_priv->fb_available_size,
Ben Skeggs7f4a1952010-11-16 11:50:09 +1000859 NV_MEM_ACCESS_RW,
860 NV_MEM_TARGET_VRAM, &vram);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000861 if (ret) {
862 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
863 return ret;
864 }
865 }
866
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000867 ret = nouveau_ramht_insert(chan, vram_h, vram);
868 nouveau_gpuobj_ref(NULL, &vram);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000869 if (ret) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000870 NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000871 return ret;
872 }
873
874 /* TT memory ctxdma */
875 if (dev_priv->card_type >= NV_50) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000876 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
877 0, dev_priv->vm_end,
Ben Skeggs7f4a1952010-11-16 11:50:09 +1000878 NV_MEM_ACCESS_RW,
879 NV_MEM_TARGET_VM, &tt);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000880 } else {
Ben Skeggs7f4a1952010-11-16 11:50:09 +1000881 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
882 0, dev_priv->gart_info.aper_size,
883 NV_MEM_ACCESS_RW,
884 NV_MEM_TARGET_GART, &tt);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000885 }
886
887 if (ret) {
888 NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
889 return ret;
890 }
891
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000892 ret = nouveau_ramht_insert(chan, tt_h, tt);
893 nouveau_gpuobj_ref(NULL, &tt);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000894 if (ret) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000895 NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000896 return ret;
897 }
898
899 return 0;
900}
901
902void
903nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
904{
905 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
906 struct drm_device *dev = chan->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000907 int i;
908
909 NV_DEBUG(dev, "ch%d\n", chan->id);
910
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000911 if (!chan->ramht)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000912 return;
913
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000914 nouveau_ramht_ref(NULL, &chan->ramht, chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000915
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000916 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
917 nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000918 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000919 nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000920
Ben Skeggsb833ac22010-06-01 15:32:24 +1000921 if (chan->ramin_heap.free_stack.next)
922 drm_mm_takedown(&chan->ramin_heap);
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000923 nouveau_gpuobj_ref(NULL, &chan->ramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000924}
925
926int
927nouveau_gpuobj_suspend(struct drm_device *dev)
928{
929 struct drm_nouveau_private *dev_priv = dev->dev_private;
930 struct nouveau_gpuobj *gpuobj;
931 int i;
932
Ben Skeggs6ee73862009-12-11 19:24:15 +1000933 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
Ben Skeggse41115d2010-11-01 11:45:02 +1000934 if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000935 continue;
936
Ben Skeggsdc1e5c02010-10-25 15:23:59 +1000937 gpuobj->suspend = vmalloc(gpuobj->size);
938 if (!gpuobj->suspend) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000939 nouveau_gpuobj_resume(dev);
940 return -ENOMEM;
941 }
942
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000943 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsdc1e5c02010-10-25 15:23:59 +1000944 gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000945 }
946
947 return 0;
948}
949
950void
Ben Skeggs6ee73862009-12-11 19:24:15 +1000951nouveau_gpuobj_resume(struct drm_device *dev)
952{
953 struct drm_nouveau_private *dev_priv = dev->dev_private;
954 struct nouveau_gpuobj *gpuobj;
955 int i;
956
Ben Skeggs6ee73862009-12-11 19:24:15 +1000957 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
Ben Skeggsdc1e5c02010-10-25 15:23:59 +1000958 if (!gpuobj->suspend)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000959 continue;
960
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000961 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsdc1e5c02010-10-25 15:23:59 +1000962 nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);
963
964 vfree(gpuobj->suspend);
965 gpuobj->suspend = NULL;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000966 }
967
Ben Skeggsdc1e5c02010-10-25 15:23:59 +1000968 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000969}
970
971int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
972 struct drm_file *file_priv)
973{
Ben Skeggs6ee73862009-12-11 19:24:15 +1000974 struct drm_nouveau_grobj_alloc *init = data;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000975 struct nouveau_channel *chan;
976 int ret;
977
Ben Skeggs6ee73862009-12-11 19:24:15 +1000978 if (init->handle == ~0)
979 return -EINVAL;
980
Ben Skeggscff5c132010-10-06 16:16:59 +1000981 chan = nouveau_channel_get(dev, file_priv, init->channel);
982 if (IS_ERR(chan))
983 return PTR_ERR(chan);
984
985 if (nouveau_ramht_find(chan, init->handle)) {
986 ret = -EEXIST;
987 goto out;
988 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000989
Ben Skeggsceac3092010-11-23 10:10:24 +1000990 ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000991 if (ret) {
992 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
993 ret, init->channel, init->handle);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000994 }
995
Ben Skeggscff5c132010-10-06 16:16:59 +1000996out:
997 nouveau_channel_put(&chan);
998 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000999}
1000
1001int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
1002 struct drm_file *file_priv)
1003{
1004 struct drm_nouveau_gpuobj_free *objfree = data;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001005 struct nouveau_channel *chan;
Ben Skeggs18a16a72010-10-12 10:11:00 +10001006 int ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001007
Ben Skeggscff5c132010-10-06 16:16:59 +10001008 chan = nouveau_channel_get(dev, file_priv, objfree->channel);
1009 if (IS_ERR(chan))
1010 return PTR_ERR(chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001011
Francisco Jerez6dccd312010-11-18 23:57:46 +01001012 /* Synchronize with the user channel */
1013 nouveau_channel_idle(chan);
1014
Ben Skeggs18a16a72010-10-12 10:11:00 +10001015 ret = nouveau_ramht_remove(chan, objfree->handle);
Ben Skeggscff5c132010-10-06 16:16:59 +10001016 nouveau_channel_put(&chan);
1017 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001018}
Ben Skeggsb3beb162010-09-01 15:24:29 +10001019
1020u32
1021nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
1022{
Ben Skeggs5125bfd2010-09-01 15:24:33 +10001023 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
1024 struct drm_device *dev = gpuobj->dev;
1025
1026 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
1027 u64 ptr = gpuobj->vinst + offset;
1028 u32 base = ptr >> 16;
1029 u32 val;
1030
1031 spin_lock(&dev_priv->ramin_lock);
1032 if (dev_priv->ramin_base != base) {
1033 dev_priv->ramin_base = base;
1034 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
1035 }
1036 val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
1037 spin_unlock(&dev_priv->ramin_lock);
1038 return val;
1039 }
1040
1041 return nv_ri32(dev, gpuobj->pinst + offset);
Ben Skeggsb3beb162010-09-01 15:24:29 +10001042}
1043
1044void
1045nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
1046{
Ben Skeggs5125bfd2010-09-01 15:24:33 +10001047 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
1048 struct drm_device *dev = gpuobj->dev;
1049
1050 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
1051 u64 ptr = gpuobj->vinst + offset;
1052 u32 base = ptr >> 16;
1053
1054 spin_lock(&dev_priv->ramin_lock);
1055 if (dev_priv->ramin_base != base) {
1056 dev_priv->ramin_base = base;
1057 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
1058 }
1059 nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
1060 spin_unlock(&dev_priv->ramin_lock);
1061 return;
1062 }
1063
1064 nv_wi32(dev, gpuobj->pinst + offset, val);
Ben Skeggsb3beb162010-09-01 15:24:29 +10001065}