blob: e8c74de905ecab2888592fea79f26ca658f8f5b1 [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright (C) 2006 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28/*
29 * Authors:
30 * Ben Skeggs <darktama@iinet.net.au>
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "nouveau_drv.h"
36#include "nouveau_drm.h"
Ben Skeggs479dcae2010-09-01 15:24:28 +100037#include "nouveau_ramht.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100038
Ben Skeggsb8c157d2010-10-20 10:39:35 +100039struct nouveau_gpuobj_method {
40 struct list_head head;
41 u32 mthd;
42 int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);
43};
44
45struct nouveau_gpuobj_class {
46 struct list_head head;
47 struct list_head methods;
48 u32 id;
49 u32 engine;
50};
51
52int
53nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)
54{
55 struct drm_nouveau_private *dev_priv = dev->dev_private;
56 struct nouveau_gpuobj_class *oc;
57
58 oc = kzalloc(sizeof(*oc), GFP_KERNEL);
59 if (!oc)
60 return -ENOMEM;
61
62 INIT_LIST_HEAD(&oc->methods);
63 oc->id = class;
64 oc->engine = engine;
65 list_add(&oc->head, &dev_priv->classes);
66 return 0;
67}
68
69int
70nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,
71 int (*exec)(struct nouveau_channel *, u32, u32, u32))
72{
73 struct drm_nouveau_private *dev_priv = dev->dev_private;
74 struct nouveau_gpuobj_method *om;
75 struct nouveau_gpuobj_class *oc;
76
77 list_for_each_entry(oc, &dev_priv->classes, head) {
78 if (oc->id == class)
79 goto found;
80 }
81
82 return -EINVAL;
83
84found:
85 om = kzalloc(sizeof(*om), GFP_KERNEL);
86 if (!om)
87 return -ENOMEM;
88
89 om->mthd = mthd;
90 om->exec = exec;
91 list_add(&om->head, &oc->methods);
92 return 0;
93}
94
95int
96nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,
97 u32 class, u32 mthd, u32 data)
98{
99 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
100 struct nouveau_gpuobj_method *om;
101 struct nouveau_gpuobj_class *oc;
102
103 list_for_each_entry(oc, &dev_priv->classes, head) {
104 if (oc->id != class)
105 continue;
106
107 list_for_each_entry(om, &oc->methods, head) {
108 if (om->mthd == mthd)
109 return om->exec(chan, class, mthd, data);
110 }
111 }
112
113 return -ENOENT;
114}
115
Ben Skeggs274fec92010-11-03 13:16:18 +1000116int
117nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
118 u32 class, u32 mthd, u32 data)
119{
120 struct drm_nouveau_private *dev_priv = dev->dev_private;
121 struct nouveau_channel *chan = NULL;
122 unsigned long flags;
123 int ret = -EINVAL;
124
125 spin_lock_irqsave(&dev_priv->channels.lock, flags);
126 if (chid > 0 && chid < dev_priv->engine.fifo.channels)
127 chan = dev_priv->channels.ptr[chid];
128 if (chan)
129 ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
130 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
131 return ret;
132}
133
Ben Skeggs6ee73862009-12-11 19:24:15 +1000134/* NVidia uses context objects to drive drawing operations.
135
136 Context objects can be selected into 8 subchannels in the FIFO,
137 and then used via DMA command buffers.
138
139 A context object is referenced by a user defined handle (CARD32). The HW
140 looks up graphics objects in a hash table in the instance RAM.
141
142 An entry in the hash table consists of 2 CARD32. The first CARD32 contains
143 the handle, the second one a bitfield, that contains the address of the
144 object in instance RAM.
145
146 The format of the second CARD32 seems to be:
147
148 NV4 to NV30:
149
150 15: 0 instance_addr >> 4
151 17:16 engine (here uses 1 = graphics)
152 28:24 channel id (here uses 0)
153 31 valid (use 1)
154
155 NV40:
156
157 15: 0 instance_addr >> 4 (maybe 19-0)
158 21:20 engine (here uses 1 = graphics)
159 I'm unsure about the other bits, but using 0 seems to work.
160
161 The key into the hash table depends on the object handle and channel id and
162 is given as:
163*/
Ben Skeggs6ee73862009-12-11 19:24:15 +1000164
165int
166nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
167 uint32_t size, int align, uint32_t flags,
168 struct nouveau_gpuobj **gpuobj_ret)
169{
170 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggse41115d2010-11-01 11:45:02 +1000171 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000172 struct nouveau_gpuobj *gpuobj;
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000173 struct drm_mm_node *ramin = NULL;
Ben Skeggse41115d2010-11-01 11:45:02 +1000174 int ret, i;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000175
176 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
177 chan ? chan->id : -1, size, align, flags);
178
Ben Skeggs6ee73862009-12-11 19:24:15 +1000179 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
180 if (!gpuobj)
181 return -ENOMEM;
182 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000183 gpuobj->dev = dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000184 gpuobj->flags = flags;
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000185 kref_init(&gpuobj->refcount);
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000186 gpuobj->size = size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000187
Ben Skeggse05d7ea2010-09-01 15:24:38 +1000188 spin_lock(&dev_priv->ramin_lock);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000189 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
Ben Skeggse05d7ea2010-09-01 15:24:38 +1000190 spin_unlock(&dev_priv->ramin_lock);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000191
Ben Skeggs6ee73862009-12-11 19:24:15 +1000192 if (chan) {
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000193 ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
194 if (ramin)
195 ramin = drm_mm_get_block(ramin, size, align);
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000196 if (!ramin) {
197 nouveau_gpuobj_ref(NULL, &gpuobj);
198 return -ENOMEM;
199 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000200
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000201 gpuobj->pinst = chan->ramin->pinst;
202 if (gpuobj->pinst != ~0)
Ben Skeggse41115d2010-11-01 11:45:02 +1000203 gpuobj->pinst += ramin->start;
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000204
Ben Skeggse41115d2010-11-01 11:45:02 +1000205 if (dev_priv->card_type < NV_50)
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000206 gpuobj->cinst = gpuobj->pinst;
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000207 else
Ben Skeggse41115d2010-11-01 11:45:02 +1000208 gpuobj->cinst = ramin->start;
209
210 gpuobj->vinst = ramin->start + chan->ramin->vinst;
211 gpuobj->node = ramin;
212 } else {
213 ret = instmem->get(gpuobj, size, align);
214 if (ret) {
215 nouveau_gpuobj_ref(NULL, &gpuobj);
216 return ret;
217 }
218
219 ret = -ENOSYS;
220 if (dev_priv->ramin_available)
221 ret = instmem->map(gpuobj);
222 if (ret)
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000223 gpuobj->pinst = ~0;
Ben Skeggse41115d2010-11-01 11:45:02 +1000224
225 gpuobj->cinst = NVOBJ_CINST_GLOBAL;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000226 }
227
Ben Skeggs6ee73862009-12-11 19:24:15 +1000228 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000229 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000230 nv_wo32(gpuobj, i, 0);
Ben Skeggse41115d2010-11-01 11:45:02 +1000231 instmem->flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000232 }
233
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000234
Ben Skeggs6ee73862009-12-11 19:24:15 +1000235 *gpuobj_ret = gpuobj;
236 return 0;
237}
238
239int
Ben Skeggsfbd28952010-09-01 15:24:34 +1000240nouveau_gpuobj_init(struct drm_device *dev)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000241{
242 struct drm_nouveau_private *dev_priv = dev->dev_private;
243
244 NV_DEBUG(dev, "\n");
245
246 INIT_LIST_HEAD(&dev_priv->gpuobj_list);
Ben Skeggsbd2e5972010-10-19 20:06:01 +1000247 INIT_LIST_HEAD(&dev_priv->classes);
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000248 spin_lock_init(&dev_priv->ramin_lock);
249 dev_priv->ramin_base = ~0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000250
251 return 0;
252}
253
Ben Skeggs6ee73862009-12-11 19:24:15 +1000254void
255nouveau_gpuobj_takedown(struct drm_device *dev)
256{
257 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000258 struct nouveau_gpuobj_method *om, *tm;
259 struct nouveau_gpuobj_class *oc, *tc;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000260
261 NV_DEBUG(dev, "\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000262
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000263 list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {
264 list_for_each_entry_safe(om, tm, &oc->methods, head) {
265 list_del(&om->head);
266 kfree(om);
267 }
268 list_del(&oc->head);
269 kfree(oc);
270 }
271
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000272 BUG_ON(!list_empty(&dev_priv->gpuobj_list));
Ben Skeggs6ee73862009-12-11 19:24:15 +1000273}
274
Ben Skeggs185abec2010-09-01 15:24:39 +1000275
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000276static void
277nouveau_gpuobj_del(struct kref *ref)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000278{
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000279 struct nouveau_gpuobj *gpuobj =
280 container_of(ref, struct nouveau_gpuobj, refcount);
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000281 struct drm_device *dev = gpuobj->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000282 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggse41115d2010-11-01 11:45:02 +1000283 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000284 int i;
285
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000286 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000287
Ben Skeggse41115d2010-11-01 11:45:02 +1000288 if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000289 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000290 nv_wo32(gpuobj, i, 0);
Ben Skeggse41115d2010-11-01 11:45:02 +1000291 instmem->flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000292 }
293
294 if (gpuobj->dtor)
295 gpuobj->dtor(dev, gpuobj);
296
Ben Skeggse41115d2010-11-01 11:45:02 +1000297 if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {
298 if (gpuobj->node) {
299 instmem->unmap(gpuobj);
300 instmem->put(gpuobj);
301 }
302 } else {
303 if (gpuobj->node) {
304 spin_lock(&dev_priv->ramin_lock);
305 drm_mm_put_block(gpuobj->node);
306 spin_unlock(&dev_priv->ramin_lock);
307 }
308 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000309
Ben Skeggse05d7ea2010-09-01 15:24:38 +1000310 spin_lock(&dev_priv->ramin_lock);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000311 list_del(&gpuobj->list);
Ben Skeggse05d7ea2010-09-01 15:24:38 +1000312 spin_unlock(&dev_priv->ramin_lock);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000313
Ben Skeggs6ee73862009-12-11 19:24:15 +1000314 kfree(gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000315}
316
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000317void
318nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000319{
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000320 if (ref)
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000321 kref_get(&ref->refcount);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000322
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000323 if (*ptr)
324 kref_put(&(*ptr)->refcount, nouveau_gpuobj_del);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000325
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000326 *ptr = ref;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000327}
328
329int
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000330nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
331 u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000332{
333 struct drm_nouveau_private *dev_priv = dev->dev_private;
334 struct nouveau_gpuobj *gpuobj = NULL;
335 int i;
336
337 NV_DEBUG(dev,
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000338 "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",
339 pinst, vinst, size, flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000340
341 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
342 if (!gpuobj)
343 return -ENOMEM;
344 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000345 gpuobj->dev = dev;
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000346 gpuobj->flags = flags;
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000347 kref_init(&gpuobj->refcount);
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000348 gpuobj->size = size;
349 gpuobj->pinst = pinst;
Ben Skeggse41115d2010-11-01 11:45:02 +1000350 gpuobj->cinst = NVOBJ_CINST_GLOBAL;
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000351 gpuobj->vinst = vinst;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000352
Ben Skeggs6ee73862009-12-11 19:24:15 +1000353 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000354 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000355 nv_wo32(gpuobj, i, 0);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000356 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000357 }
358
Ben Skeggse05d7ea2010-09-01 15:24:38 +1000359 spin_lock(&dev_priv->ramin_lock);
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000360 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
Ben Skeggse05d7ea2010-09-01 15:24:38 +1000361 spin_unlock(&dev_priv->ramin_lock);
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000362 *pgpuobj = gpuobj;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000363 return 0;
364}
365
366
367static uint32_t
368nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
369{
370 struct drm_nouveau_private *dev_priv = dev->dev_private;
371
372 /*XXX: dodgy hack for now */
373 if (dev_priv->card_type >= NV_50)
374 return 24;
375 if (dev_priv->card_type >= NV_40)
376 return 32;
377 return 16;
378}
379
380/*
381 DMA objects are used to reference a piece of memory in the
382 framebuffer, PCI or AGP address space. Each object is 16 bytes big
383 and looks as follows:
384
385 entry[0]
386 11:0 class (seems like I can always use 0 here)
387 12 page table present?
388 13 page entry linear?
389 15:14 access: 0 rw, 1 ro, 2 wo
390 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
391 31:20 dma adjust (bits 0-11 of the address)
392 entry[1]
393 dma limit (size of transfer)
394 entry[X]
395 1 0 readonly, 1 readwrite
396 31:12 dma frame address of the page (bits 12-31 of the address)
397 entry[N]
398 page table terminator, same value as the first pte, as does nvidia
399 rivatv uses 0xffffffff
400
401 Non linear page tables need a list of frame addresses afterwards,
402 the rivatv project has some info on this.
403
404 The method below creates a DMA object in instance RAM and returns a handle
405 to it that can be used to set up context objects.
406*/
407int
408nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
409 uint64_t offset, uint64_t size, int access,
410 int target, struct nouveau_gpuobj **gpuobj)
411{
412 struct drm_device *dev = chan->dev;
413 struct drm_nouveau_private *dev_priv = dev->dev_private;
414 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
415 int ret;
416
417 NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
418 chan->id, class, offset, size);
419 NV_DEBUG(dev, "access=%d target=%d\n", access, target);
420
421 switch (target) {
422 case NV_DMA_TARGET_AGP:
423 offset += dev_priv->gart_info.aper_base;
424 break;
425 default:
426 break;
427 }
428
429 ret = nouveau_gpuobj_new(dev, chan,
430 nouveau_gpuobj_class_instmem_size(dev, class),
431 16, NVOBJ_FLAG_ZERO_ALLOC |
432 NVOBJ_FLAG_ZERO_FREE, gpuobj);
433 if (ret) {
434 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
435 return ret;
436 }
437
Ben Skeggs6ee73862009-12-11 19:24:15 +1000438 if (dev_priv->card_type < NV_50) {
439 uint32_t frame, adjust, pte_flags = 0;
440
441 if (access != NV_DMA_ACCESS_RO)
442 pte_flags |= (1<<1);
443 adjust = offset & 0x00000fff;
444 frame = offset & ~0x00000fff;
445
Ben Skeggsb3beb162010-09-01 15:24:29 +1000446 nv_wo32(*gpuobj, 0, ((1<<12) | (1<<13) | (adjust << 20) |
447 (access << 14) | (target << 16) |
448 class));
449 nv_wo32(*gpuobj, 4, size - 1);
450 nv_wo32(*gpuobj, 8, frame | pte_flags);
451 nv_wo32(*gpuobj, 12, frame | pte_flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000452 } else {
453 uint64_t limit = offset + size - 1;
454 uint32_t flags0, flags5;
455
456 if (target == NV_DMA_TARGET_VIDMEM) {
457 flags0 = 0x00190000;
458 flags5 = 0x00010000;
459 } else {
460 flags0 = 0x7fc00000;
461 flags5 = 0x00080000;
462 }
463
Ben Skeggsb3beb162010-09-01 15:24:29 +1000464 nv_wo32(*gpuobj, 0, flags0 | class);
465 nv_wo32(*gpuobj, 4, lower_32_bits(limit));
466 nv_wo32(*gpuobj, 8, lower_32_bits(offset));
467 nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) |
468 (upper_32_bits(offset) & 0xff));
469 nv_wo32(*gpuobj, 20, flags5);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000470 }
471
Ben Skeggsf56cb862010-07-08 11:29:10 +1000472 instmem->flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000473
474 (*gpuobj)->engine = NVOBJ_ENGINE_SW;
475 (*gpuobj)->class = class;
476 return 0;
477}
478
479int
480nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
481 uint64_t offset, uint64_t size, int access,
482 struct nouveau_gpuobj **gpuobj,
483 uint32_t *o_ret)
484{
485 struct drm_device *dev = chan->dev;
486 struct drm_nouveau_private *dev_priv = dev->dev_private;
487 int ret;
488
489 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
490 (dev_priv->card_type >= NV_50 &&
491 dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
492 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
493 offset + dev_priv->vm_gart_base,
494 size, access, NV_DMA_TARGET_AGP,
495 gpuobj);
496 if (o_ret)
497 *o_ret = 0;
498 } else
499 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000500 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000501 if (offset & ~0xffffffffULL) {
502 NV_ERROR(dev, "obj offset exceeds 32-bits\n");
503 return -EINVAL;
504 }
505 if (o_ret)
506 *o_ret = (uint32_t)offset;
507 ret = (*gpuobj != NULL) ? 0 : -EINVAL;
508 } else {
509 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
510 return -EINVAL;
511 }
512
513 return ret;
514}
515
516/* Context objects in the instance RAM have the following structure.
517 * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
518
519 NV4 - NV30:
520
521 entry[0]
522 11:0 class
523 12 chroma key enable
524 13 user clip enable
525 14 swizzle enable
526 17:15 patch config:
527 scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
528 18 synchronize enable
529 19 endian: 1 big, 0 little
530 21:20 dither mode
531 23 single step enable
532 24 patch status: 0 invalid, 1 valid
533 25 context_surface 0: 1 valid
534 26 context surface 1: 1 valid
535 27 context pattern: 1 valid
536 28 context rop: 1 valid
537 29,30 context beta, beta4
538 entry[1]
539 7:0 mono format
540 15:8 color format
541 31:16 notify instance address
542 entry[2]
543 15:0 dma 0 instance address
544 31:16 dma 1 instance address
545 entry[3]
546 dma method traps
547
548 NV40:
549 No idea what the exact format is. Here's what can be deducted:
550
551 entry[0]:
552 11:0 class (maybe uses more bits here?)
553 17 user clip enable
554 21:19 patch config
555 25 patch status valid ?
556 entry[1]:
557 15:0 DMA notifier (maybe 20:0)
558 entry[2]:
559 15:0 DMA 0 instance (maybe 20:0)
560 24 big endian
561 entry[3]:
562 15:0 DMA 1 instance (maybe 20:0)
563 entry[4]:
564 entry[5]:
565 set to 0?
566*/
Ben Skeggsa6a1a382010-10-19 19:57:34 +1000567static int
568nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
569 struct nouveau_gpuobj **gpuobj_ret)
570{
571 struct drm_nouveau_private *dev_priv;
572 struct nouveau_gpuobj *gpuobj;
573
574 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
575 return -EINVAL;
576 dev_priv = chan->dev->dev_private;
577
578 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
579 if (!gpuobj)
580 return -ENOMEM;
581 gpuobj->dev = chan->dev;
582 gpuobj->engine = NVOBJ_ENGINE_SW;
583 gpuobj->class = class;
584 kref_init(&gpuobj->refcount);
585 gpuobj->cinst = 0x40;
586
587 spin_lock(&dev_priv->ramin_lock);
588 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
589 spin_unlock(&dev_priv->ramin_lock);
590 *gpuobj_ret = gpuobj;
591 return 0;
592}
593
Ben Skeggs6ee73862009-12-11 19:24:15 +1000594int
595nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
596 struct nouveau_gpuobj **gpuobj)
597{
Ben Skeggsa6a1a382010-10-19 19:57:34 +1000598 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000599 struct drm_device *dev = chan->dev;
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000600 struct nouveau_gpuobj_class *oc;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000601 int ret;
602
603 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
604
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000605 list_for_each_entry(oc, &dev_priv->classes, head) {
606 if (oc->id == class)
607 goto found;
Ben Skeggsa6a1a382010-10-19 19:57:34 +1000608 }
609
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000610 NV_ERROR(dev, "illegal object class: 0x%x\n", class);
611 return -EINVAL;
Ben Skeggsa6a1a382010-10-19 19:57:34 +1000612
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000613found:
614 if (oc->engine == NVOBJ_ENGINE_SW)
Ben Skeggsa6a1a382010-10-19 19:57:34 +1000615 return nouveau_gpuobj_sw_new(chan, class, gpuobj);
616
Ben Skeggsf4512e62010-10-20 11:47:09 +1000617 switch (oc->engine) {
618 case NVOBJ_ENGINE_GR:
619 if (dev_priv->card_type >= NV_50 && !chan->ramin_grctx) {
620 struct nouveau_pgraph_engine *pgraph =
621 &dev_priv->engine.graph;
622
623 ret = pgraph->create_context(chan);
624 if (ret)
625 return ret;
626 }
627 break;
628 case NVOBJ_ENGINE_CRYPT:
629 if (!chan->crypt_ctx) {
630 struct nouveau_crypt_engine *pcrypt =
631 &dev_priv->engine.crypt;
632
633 ret = pcrypt->create_context(chan);
634 if (ret)
635 return ret;
636 }
637 break;
638 }
639
Ben Skeggs6ee73862009-12-11 19:24:15 +1000640 ret = nouveau_gpuobj_new(dev, chan,
641 nouveau_gpuobj_class_instmem_size(dev, class),
642 16,
643 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
644 gpuobj);
645 if (ret) {
Ben Skeggsa6a1a382010-10-19 19:57:34 +1000646 NV_ERROR(dev, "error creating gpuobj: %d\n", ret);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000647 return ret;
648 }
649
Ben Skeggs6ee73862009-12-11 19:24:15 +1000650 if (dev_priv->card_type >= NV_50) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000651 nv_wo32(*gpuobj, 0, class);
652 nv_wo32(*gpuobj, 20, 0x00010000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000653 } else {
654 switch (class) {
655 case NV_CLASS_NULL:
Ben Skeggsb3beb162010-09-01 15:24:29 +1000656 nv_wo32(*gpuobj, 0, 0x00001030);
657 nv_wo32(*gpuobj, 4, 0xFFFFFFFF);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000658 break;
659 default:
660 if (dev_priv->card_type >= NV_40) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000661 nv_wo32(*gpuobj, 0, class);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000662#ifdef __BIG_ENDIAN
Ben Skeggsb3beb162010-09-01 15:24:29 +1000663 nv_wo32(*gpuobj, 8, 0x01000000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000664#endif
665 } else {
666#ifdef __BIG_ENDIAN
Ben Skeggsb3beb162010-09-01 15:24:29 +1000667 nv_wo32(*gpuobj, 0, class | 0x00080000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000668#else
Ben Skeggsb3beb162010-09-01 15:24:29 +1000669 nv_wo32(*gpuobj, 0, class);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000670#endif
671 }
672 }
673 }
Ben Skeggsf56cb862010-07-08 11:29:10 +1000674 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000675
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000676 (*gpuobj)->engine = oc->engine;
677 (*gpuobj)->class = oc->id;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000678 return 0;
679}
680
Ben Skeggs6ee73862009-12-11 19:24:15 +1000681static int
682nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
683{
684 struct drm_device *dev = chan->dev;
685 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000686 uint32_t size;
687 uint32_t base;
688 int ret;
689
690 NV_DEBUG(dev, "ch%d\n", chan->id);
691
692 /* Base amount for object storage (4KiB enough?) */
Ben Skeggsbd2e5972010-10-19 20:06:01 +1000693 size = 0x2000;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000694 base = 0;
695
696 /* PGRAPH context */
Ben Skeggs816544b2010-07-08 13:15:05 +1000697 size += dev_priv->engine.graph.grctx_size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000698
699 if (dev_priv->card_type == NV_50) {
700 /* Various fixed table thingos */
701 size += 0x1400; /* mostly unknown stuff */
702 size += 0x4000; /* vm pd */
703 base = 0x6000;
704 /* RAMHT, not sure about setting size yet, 32KiB to be safe */
705 size += 0x8000;
706 /* RAMFC */
707 size += 0x1000;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000708 }
709
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000710 ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000711 if (ret) {
712 NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
713 return ret;
714 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000715
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000716 ret = drm_mm_init(&chan->ramin_heap, base, size);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000717 if (ret) {
718 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000719 nouveau_gpuobj_ref(NULL, &chan->ramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000720 return ret;
721 }
722
723 return 0;
724}
725
726int
727nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
728 uint32_t vram_h, uint32_t tt_h)
729{
730 struct drm_device *dev = chan->dev;
731 struct drm_nouveau_private *dev_priv = dev->dev_private;
732 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
733 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
734 int ret, i;
735
Ben Skeggs6ee73862009-12-11 19:24:15 +1000736 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
737
Ben Skeggs816544b2010-07-08 13:15:05 +1000738 /* Allocate a chunk of memory for per-channel object storage */
739 ret = nouveau_gpuobj_channel_init_pramin(chan);
740 if (ret) {
741 NV_ERROR(dev, "init pramin\n");
742 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000743 }
744
745 /* NV50 VM
746 * - Allocate per-channel page-directory
747 * - Map GART and VRAM into the channel's address space at the
748 * locations determined during init.
749 */
750 if (dev_priv->card_type >= NV_50) {
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000751 u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
752 u64 vm_vinst = chan->ramin->vinst + pgd_offs;
753 u32 vm_pinst = chan->ramin->pinst;
754 u32 pde;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000755
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000756 if (vm_pinst != ~0)
757 vm_pinst += pgd_offs;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000758
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000759 ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000,
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000760 0, &chan->vm_pd);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000761 if (ret)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000762 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000763 for (i = 0; i < 0x4000; i += 8) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000764 nv_wo32(chan->vm_pd, i + 0, 0x00000000);
765 nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000766 }
767
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000768 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma,
769 &chan->vm_gart_pt);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000770 pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000771 nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000772 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000773
Ben Skeggsb3beb162010-09-01 15:24:29 +1000774 pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000775 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000776 nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i],
777 &chan->vm_vram_pt[i]);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000778
Ben Skeggsb3beb162010-09-01 15:24:29 +1000779 nv_wo32(chan->vm_pd, pde + 0,
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000780 chan->vm_vram_pt[i]->vinst | 0x61);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000781 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
782 pde += 8;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000783 }
784
Ben Skeggsf56cb862010-07-08 11:29:10 +1000785 instmem->flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000786 }
787
788 /* RAMHT */
789 if (dev_priv->card_type < NV_50) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000790 nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
791 } else {
792 struct nouveau_gpuobj *ramht = NULL;
793
794 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
795 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000796 if (ret)
797 return ret;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000798
799 ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
800 nouveau_gpuobj_ref(NULL, &ramht);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000801 if (ret)
802 return ret;
803 }
804
805 /* VRAM ctxdma */
806 if (dev_priv->card_type >= NV_50) {
807 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
808 0, dev_priv->vm_end,
809 NV_DMA_ACCESS_RW,
810 NV_DMA_TARGET_AGP, &vram);
811 if (ret) {
812 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
813 return ret;
814 }
815 } else {
816 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000817 0, dev_priv->fb_available_size,
818 NV_DMA_ACCESS_RW,
819 NV_DMA_TARGET_VIDMEM, &vram);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000820 if (ret) {
821 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
822 return ret;
823 }
824 }
825
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000826 ret = nouveau_ramht_insert(chan, vram_h, vram);
827 nouveau_gpuobj_ref(NULL, &vram);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000828 if (ret) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000829 NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000830 return ret;
831 }
832
833 /* TT memory ctxdma */
834 if (dev_priv->card_type >= NV_50) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000835 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
836 0, dev_priv->vm_end,
837 NV_DMA_ACCESS_RW,
838 NV_DMA_TARGET_AGP, &tt);
839 if (ret) {
840 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
841 return ret;
842 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000843 } else
844 if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
845 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
846 dev_priv->gart_info.aper_size,
847 NV_DMA_ACCESS_RW, &tt, NULL);
848 } else {
849 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
850 ret = -EINVAL;
851 }
852
853 if (ret) {
854 NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
855 return ret;
856 }
857
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000858 ret = nouveau_ramht_insert(chan, tt_h, tt);
859 nouveau_gpuobj_ref(NULL, &tt);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000860 if (ret) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000861 NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000862 return ret;
863 }
864
865 return 0;
866}
867
868void
869nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
870{
871 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
872 struct drm_device *dev = chan->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000873 int i;
874
875 NV_DEBUG(dev, "ch%d\n", chan->id);
876
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000877 if (!chan->ramht)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000878 return;
879
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000880 nouveau_ramht_ref(NULL, &chan->ramht, chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000881
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000882 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
883 nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000884 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000885 nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000886
Ben Skeggsb833ac22010-06-01 15:32:24 +1000887 if (chan->ramin_heap.free_stack.next)
888 drm_mm_takedown(&chan->ramin_heap);
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000889 nouveau_gpuobj_ref(NULL, &chan->ramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000890}
891
892int
893nouveau_gpuobj_suspend(struct drm_device *dev)
894{
895 struct drm_nouveau_private *dev_priv = dev->dev_private;
896 struct nouveau_gpuobj *gpuobj;
897 int i;
898
Ben Skeggs6ee73862009-12-11 19:24:15 +1000899 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
Ben Skeggse41115d2010-11-01 11:45:02 +1000900 if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000901 continue;
902
Ben Skeggsdc1e5c02010-10-25 15:23:59 +1000903 gpuobj->suspend = vmalloc(gpuobj->size);
904 if (!gpuobj->suspend) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000905 nouveau_gpuobj_resume(dev);
906 return -ENOMEM;
907 }
908
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000909 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsdc1e5c02010-10-25 15:23:59 +1000910 gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000911 }
912
913 return 0;
914}
915
916void
Ben Skeggs6ee73862009-12-11 19:24:15 +1000917nouveau_gpuobj_resume(struct drm_device *dev)
918{
919 struct drm_nouveau_private *dev_priv = dev->dev_private;
920 struct nouveau_gpuobj *gpuobj;
921 int i;
922
Ben Skeggs6ee73862009-12-11 19:24:15 +1000923 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
Ben Skeggsdc1e5c02010-10-25 15:23:59 +1000924 if (!gpuobj->suspend)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000925 continue;
926
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000927 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsdc1e5c02010-10-25 15:23:59 +1000928 nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);
929
930 vfree(gpuobj->suspend);
931 gpuobj->suspend = NULL;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000932 }
933
Ben Skeggsdc1e5c02010-10-25 15:23:59 +1000934 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000935}
936
937int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
938 struct drm_file *file_priv)
939{
Ben Skeggs6ee73862009-12-11 19:24:15 +1000940 struct drm_nouveau_grobj_alloc *init = data;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000941 struct nouveau_gpuobj *gr = NULL;
942 struct nouveau_channel *chan;
943 int ret;
944
Ben Skeggs6ee73862009-12-11 19:24:15 +1000945 if (init->handle == ~0)
946 return -EINVAL;
947
Ben Skeggscff5c132010-10-06 16:16:59 +1000948 chan = nouveau_channel_get(dev, file_priv, init->channel);
949 if (IS_ERR(chan))
950 return PTR_ERR(chan);
951
952 if (nouveau_ramht_find(chan, init->handle)) {
953 ret = -EEXIST;
954 goto out;
955 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000956
Ben Skeggsa6a1a382010-10-19 19:57:34 +1000957 ret = nouveau_gpuobj_gr_new(chan, init->class, &gr);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000958 if (ret) {
959 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
960 ret, init->channel, init->handle);
Ben Skeggscff5c132010-10-06 16:16:59 +1000961 goto out;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000962 }
963
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000964 ret = nouveau_ramht_insert(chan, init->handle, gr);
965 nouveau_gpuobj_ref(NULL, &gr);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000966 if (ret) {
967 NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
968 ret, init->channel, init->handle);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000969 }
970
Ben Skeggscff5c132010-10-06 16:16:59 +1000971out:
972 nouveau_channel_put(&chan);
973 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000974}
975
976int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
977 struct drm_file *file_priv)
978{
979 struct drm_nouveau_gpuobj_free *objfree = data;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000980 struct nouveau_channel *chan;
Ben Skeggs18a16a72010-10-12 10:11:00 +1000981 int ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000982
Ben Skeggscff5c132010-10-06 16:16:59 +1000983 chan = nouveau_channel_get(dev, file_priv, objfree->channel);
984 if (IS_ERR(chan))
985 return PTR_ERR(chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000986
Ben Skeggs18a16a72010-10-12 10:11:00 +1000987 ret = nouveau_ramht_remove(chan, objfree->handle);
Ben Skeggscff5c132010-10-06 16:16:59 +1000988 nouveau_channel_put(&chan);
989 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000990}
Ben Skeggsb3beb162010-09-01 15:24:29 +1000991
992u32
993nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
994{
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000995 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
996 struct drm_device *dev = gpuobj->dev;
997
998 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
999 u64 ptr = gpuobj->vinst + offset;
1000 u32 base = ptr >> 16;
1001 u32 val;
1002
1003 spin_lock(&dev_priv->ramin_lock);
1004 if (dev_priv->ramin_base != base) {
1005 dev_priv->ramin_base = base;
1006 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
1007 }
1008 val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
1009 spin_unlock(&dev_priv->ramin_lock);
1010 return val;
1011 }
1012
1013 return nv_ri32(dev, gpuobj->pinst + offset);
Ben Skeggsb3beb162010-09-01 15:24:29 +10001014}
1015
1016void
1017nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
1018{
Ben Skeggs5125bfd2010-09-01 15:24:33 +10001019 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
1020 struct drm_device *dev = gpuobj->dev;
1021
1022 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
1023 u64 ptr = gpuobj->vinst + offset;
1024 u32 base = ptr >> 16;
1025
1026 spin_lock(&dev_priv->ramin_lock);
1027 if (dev_priv->ramin_base != base) {
1028 dev_priv->ramin_base = base;
1029 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
1030 }
1031 nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
1032 spin_unlock(&dev_priv->ramin_lock);
1033 return;
1034 }
1035
1036 nv_wi32(dev, gpuobj->pinst + offset, val);
Ben Skeggsb3beb162010-09-01 15:24:29 +10001037}