blob: 8c5e35cc04df823ac85e0bd8b89dac9423f045c2 [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright (C) 2006 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28/*
29 * Authors:
30 * Ben Skeggs <darktama@iinet.net.au>
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "nouveau_drv.h"
36#include "nouveau_drm.h"
Ben Skeggs479dcae2010-09-01 15:24:28 +100037#include "nouveau_ramht.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100038
Ben Skeggsb8c157d2010-10-20 10:39:35 +100039struct nouveau_gpuobj_method {
40 struct list_head head;
41 u32 mthd;
42 int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);
43};
44
45struct nouveau_gpuobj_class {
46 struct list_head head;
47 struct list_head methods;
48 u32 id;
49 u32 engine;
50};
51
52int
53nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)
54{
55 struct drm_nouveau_private *dev_priv = dev->dev_private;
56 struct nouveau_gpuobj_class *oc;
57
58 oc = kzalloc(sizeof(*oc), GFP_KERNEL);
59 if (!oc)
60 return -ENOMEM;
61
62 INIT_LIST_HEAD(&oc->methods);
63 oc->id = class;
64 oc->engine = engine;
65 list_add(&oc->head, &dev_priv->classes);
66 return 0;
67}
68
69int
70nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,
71 int (*exec)(struct nouveau_channel *, u32, u32, u32))
72{
73 struct drm_nouveau_private *dev_priv = dev->dev_private;
74 struct nouveau_gpuobj_method *om;
75 struct nouveau_gpuobj_class *oc;
76
77 list_for_each_entry(oc, &dev_priv->classes, head) {
78 if (oc->id == class)
79 goto found;
80 }
81
82 return -EINVAL;
83
84found:
85 om = kzalloc(sizeof(*om), GFP_KERNEL);
86 if (!om)
87 return -ENOMEM;
88
89 om->mthd = mthd;
90 om->exec = exec;
91 list_add(&om->head, &oc->methods);
92 return 0;
93}
94
95int
96nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,
97 u32 class, u32 mthd, u32 data)
98{
99 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
100 struct nouveau_gpuobj_method *om;
101 struct nouveau_gpuobj_class *oc;
102
103 list_for_each_entry(oc, &dev_priv->classes, head) {
104 if (oc->id != class)
105 continue;
106
107 list_for_each_entry(om, &oc->methods, head) {
108 if (om->mthd == mthd)
109 return om->exec(chan, class, mthd, data);
110 }
111 }
112
113 return -ENOENT;
114}
115
Ben Skeggs274fec92010-11-03 13:16:18 +1000116int
117nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
118 u32 class, u32 mthd, u32 data)
119{
120 struct drm_nouveau_private *dev_priv = dev->dev_private;
121 struct nouveau_channel *chan = NULL;
122 unsigned long flags;
123 int ret = -EINVAL;
124
125 spin_lock_irqsave(&dev_priv->channels.lock, flags);
126 if (chid > 0 && chid < dev_priv->engine.fifo.channels)
127 chan = dev_priv->channels.ptr[chid];
128 if (chan)
129 ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
130 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
131 return ret;
132}
133
Ben Skeggs6ee73862009-12-11 19:24:15 +1000134/* NVidia uses context objects to drive drawing operations.
135
136 Context objects can be selected into 8 subchannels in the FIFO,
137 and then used via DMA command buffers.
138
139 A context object is referenced by a user defined handle (CARD32). The HW
140 looks up graphics objects in a hash table in the instance RAM.
141
142 An entry in the hash table consists of 2 CARD32. The first CARD32 contains
143 the handle, the second one a bitfield, that contains the address of the
144 object in instance RAM.
145
146 The format of the second CARD32 seems to be:
147
148 NV4 to NV30:
149
150 15: 0 instance_addr >> 4
151 17:16 engine (here uses 1 = graphics)
152 28:24 channel id (here uses 0)
153 31 valid (use 1)
154
155 NV40:
156
157 15: 0 instance_addr >> 4 (maybe 19-0)
158 21:20 engine (here uses 1 = graphics)
159 I'm unsure about the other bits, but using 0 seems to work.
160
161 The key into the hash table depends on the object handle and channel id and
162 is given as:
163*/
Ben Skeggs6ee73862009-12-11 19:24:15 +1000164
165int
166nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
167 uint32_t size, int align, uint32_t flags,
168 struct nouveau_gpuobj **gpuobj_ret)
169{
170 struct drm_nouveau_private *dev_priv = dev->dev_private;
171 struct nouveau_engine *engine = &dev_priv->engine;
172 struct nouveau_gpuobj *gpuobj;
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000173 struct drm_mm_node *ramin = NULL;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000174 int ret;
175
176 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
177 chan ? chan->id : -1, size, align, flags);
178
179 if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
180 return -EINVAL;
181
182 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
183 if (!gpuobj)
184 return -ENOMEM;
185 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000186 gpuobj->dev = dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000187 gpuobj->flags = flags;
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000188 kref_init(&gpuobj->refcount);
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000189 gpuobj->size = size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000190
Ben Skeggse05d7ea2010-09-01 15:24:38 +1000191 spin_lock(&dev_priv->ramin_lock);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000192 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
Ben Skeggse05d7ea2010-09-01 15:24:38 +1000193 spin_unlock(&dev_priv->ramin_lock);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000194
Ben Skeggs6ee73862009-12-11 19:24:15 +1000195 if (chan) {
Ben Skeggs816544b2010-07-08 13:15:05 +1000196 NV_DEBUG(dev, "channel heap\n");
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000197
198 ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
199 if (ramin)
200 ramin = drm_mm_get_block(ramin, size, align);
201
202 if (!ramin) {
203 nouveau_gpuobj_ref(NULL, &gpuobj);
204 return -ENOMEM;
205 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000206 } else {
207 NV_DEBUG(dev, "global heap\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000208
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000209 /* allocate backing pages, sets vinst */
Ben Skeggs91004682010-10-15 09:15:26 +1000210 ret = engine->instmem.populate(dev, gpuobj, &size, align);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000211 if (ret) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000212 nouveau_gpuobj_ref(NULL, &gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000213 return ret;
214 }
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000215
216 /* try and get aperture space */
Ben Skeggse05d7ea2010-09-01 15:24:38 +1000217 do {
218 if (drm_mm_pre_get(&dev_priv->ramin_heap))
219 return -ENOMEM;
220
221 spin_lock(&dev_priv->ramin_lock);
222 ramin = drm_mm_search_free(&dev_priv->ramin_heap, size,
223 align, 0);
224 if (ramin == NULL) {
225 spin_unlock(&dev_priv->ramin_lock);
226 nouveau_gpuobj_ref(NULL, &gpuobj);
Francisco Jerezdd661e52010-11-01 18:06:28 +0100227 return -ENOMEM;
Ben Skeggse05d7ea2010-09-01 15:24:38 +1000228 }
229
230 ramin = drm_mm_get_block_atomic(ramin, size, align);
231 spin_unlock(&dev_priv->ramin_lock);
232 } while (ramin == NULL);
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000233
234 /* on nv50 it's ok to fail, we have a fallback path */
235 if (!ramin && dev_priv->card_type < NV_50) {
236 nouveau_gpuobj_ref(NULL, &gpuobj);
237 return -ENOMEM;
238 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000239 }
240
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000241 /* if we got a chunk of the aperture, map pages into it */
242 gpuobj->im_pramin = ramin;
Ben Skeggsfbd2895e2010-09-01 15:24:34 +1000243 if (!chan && gpuobj->im_pramin && dev_priv->ramin_available) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000244 ret = engine->instmem.bind(dev, gpuobj);
245 if (ret) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000246 nouveau_gpuobj_ref(NULL, &gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000247 return ret;
248 }
249 }
250
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000251 /* calculate the various different addresses for the object */
252 if (chan) {
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000253 gpuobj->pinst = chan->ramin->pinst;
254 if (gpuobj->pinst != ~0)
255 gpuobj->pinst += gpuobj->im_pramin->start;
256
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000257 if (dev_priv->card_type < NV_50) {
258 gpuobj->cinst = gpuobj->pinst;
259 } else {
260 gpuobj->cinst = gpuobj->im_pramin->start;
261 gpuobj->vinst = gpuobj->im_pramin->start +
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000262 chan->ramin->vinst;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000263 }
264 } else {
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000265 if (gpuobj->im_pramin)
266 gpuobj->pinst = gpuobj->im_pramin->start;
267 else
268 gpuobj->pinst = ~0;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000269 gpuobj->cinst = 0xdeadbeef;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000270 }
271
Ben Skeggs6ee73862009-12-11 19:24:15 +1000272 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
273 int i;
274
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000275 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000276 nv_wo32(gpuobj, i, 0);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000277 engine->instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000278 }
279
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000280
Ben Skeggs6ee73862009-12-11 19:24:15 +1000281 *gpuobj_ret = gpuobj;
282 return 0;
283}
284
285int
Ben Skeggsfbd2895e2010-09-01 15:24:34 +1000286nouveau_gpuobj_init(struct drm_device *dev)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000287{
288 struct drm_nouveau_private *dev_priv = dev->dev_private;
289
290 NV_DEBUG(dev, "\n");
291
292 INIT_LIST_HEAD(&dev_priv->gpuobj_list);
Ben Skeggsbd2e5972010-10-19 20:06:01 +1000293 INIT_LIST_HEAD(&dev_priv->classes);
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000294 spin_lock_init(&dev_priv->ramin_lock);
295 dev_priv->ramin_base = ~0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000296
297 return 0;
298}
299
Ben Skeggs6ee73862009-12-11 19:24:15 +1000300void
301nouveau_gpuobj_takedown(struct drm_device *dev)
302{
303 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000304 struct nouveau_gpuobj_method *om, *tm;
305 struct nouveau_gpuobj_class *oc, *tc;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000306
307 NV_DEBUG(dev, "\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000308
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000309 list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {
310 list_for_each_entry_safe(om, tm, &oc->methods, head) {
311 list_del(&om->head);
312 kfree(om);
313 }
314 list_del(&oc->head);
315 kfree(oc);
316 }
317
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000318 BUG_ON(!list_empty(&dev_priv->gpuobj_list));
Ben Skeggs6ee73862009-12-11 19:24:15 +1000319}
320
Ben Skeggs185abec2010-09-01 15:24:39 +1000321
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000322static void
323nouveau_gpuobj_del(struct kref *ref)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000324{
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000325 struct nouveau_gpuobj *gpuobj =
326 container_of(ref, struct nouveau_gpuobj, refcount);
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000327 struct drm_device *dev = gpuobj->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000328 struct drm_nouveau_private *dev_priv = dev->dev_private;
329 struct nouveau_engine *engine = &dev_priv->engine;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000330 int i;
331
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000332 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000333
334 if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000335 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000336 nv_wo32(gpuobj, i, 0);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000337 engine->instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000338 }
339
340 if (gpuobj->dtor)
341 gpuobj->dtor(dev, gpuobj);
342
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000343 if (gpuobj->im_backing)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000344 engine->instmem.clear(dev, gpuobj);
345
Ben Skeggse05d7ea2010-09-01 15:24:38 +1000346 spin_lock(&dev_priv->ramin_lock);
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000347 if (gpuobj->im_pramin)
348 drm_mm_put_block(gpuobj->im_pramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000349 list_del(&gpuobj->list);
Ben Skeggse05d7ea2010-09-01 15:24:38 +1000350 spin_unlock(&dev_priv->ramin_lock);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000351
Ben Skeggs6ee73862009-12-11 19:24:15 +1000352 kfree(gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000353}
354
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000355void
356nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000357{
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000358 if (ref)
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000359 kref_get(&ref->refcount);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000360
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000361 if (*ptr)
362 kref_put(&(*ptr)->refcount, nouveau_gpuobj_del);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000363
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000364 *ptr = ref;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000365}
366
367int
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000368nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
369 u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000370{
371 struct drm_nouveau_private *dev_priv = dev->dev_private;
372 struct nouveau_gpuobj *gpuobj = NULL;
373 int i;
374
375 NV_DEBUG(dev,
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000376 "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",
377 pinst, vinst, size, flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000378
379 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
380 if (!gpuobj)
381 return -ENOMEM;
382 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000383 gpuobj->dev = dev;
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000384 gpuobj->flags = flags;
Ben Skeggseb9bcbd2010-09-01 15:24:37 +1000385 kref_init(&gpuobj->refcount);
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000386 gpuobj->size = size;
387 gpuobj->pinst = pinst;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000388 gpuobj->cinst = 0xdeadbeef;
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000389 gpuobj->vinst = vinst;
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000390
Ben Skeggs6ee73862009-12-11 19:24:15 +1000391 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000392 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsb3beb162010-09-01 15:24:29 +1000393 nv_wo32(gpuobj, i, 0);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000394 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000395 }
396
Ben Skeggse05d7ea2010-09-01 15:24:38 +1000397 spin_lock(&dev_priv->ramin_lock);
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000398 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
Ben Skeggse05d7ea2010-09-01 15:24:38 +1000399 spin_unlock(&dev_priv->ramin_lock);
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000400 *pgpuobj = gpuobj;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000401 return 0;
402}
403
404
405static uint32_t
406nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
407{
408 struct drm_nouveau_private *dev_priv = dev->dev_private;
409
410 /*XXX: dodgy hack for now */
411 if (dev_priv->card_type >= NV_50)
412 return 24;
413 if (dev_priv->card_type >= NV_40)
414 return 32;
415 return 16;
416}
417
418/*
419 DMA objects are used to reference a piece of memory in the
420 framebuffer, PCI or AGP address space. Each object is 16 bytes big
421 and looks as follows:
422
423 entry[0]
424 11:0 class (seems like I can always use 0 here)
425 12 page table present?
426 13 page entry linear?
427 15:14 access: 0 rw, 1 ro, 2 wo
428 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
429 31:20 dma adjust (bits 0-11 of the address)
430 entry[1]
431 dma limit (size of transfer)
432 entry[X]
433 1 0 readonly, 1 readwrite
434 31:12 dma frame address of the page (bits 12-31 of the address)
435 entry[N]
436 page table terminator, same value as the first pte, as does nvidia
437 rivatv uses 0xffffffff
438
439 Non linear page tables need a list of frame addresses afterwards,
440 the rivatv project has some info on this.
441
442 The method below creates a DMA object in instance RAM and returns a handle
443 to it that can be used to set up context objects.
444*/
445int
446nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
447 uint64_t offset, uint64_t size, int access,
448 int target, struct nouveau_gpuobj **gpuobj)
449{
450 struct drm_device *dev = chan->dev;
451 struct drm_nouveau_private *dev_priv = dev->dev_private;
452 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
453 int ret;
454
455 NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
456 chan->id, class, offset, size);
457 NV_DEBUG(dev, "access=%d target=%d\n", access, target);
458
459 switch (target) {
460 case NV_DMA_TARGET_AGP:
461 offset += dev_priv->gart_info.aper_base;
462 break;
463 default:
464 break;
465 }
466
467 ret = nouveau_gpuobj_new(dev, chan,
468 nouveau_gpuobj_class_instmem_size(dev, class),
469 16, NVOBJ_FLAG_ZERO_ALLOC |
470 NVOBJ_FLAG_ZERO_FREE, gpuobj);
471 if (ret) {
472 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
473 return ret;
474 }
475
Ben Skeggs6ee73862009-12-11 19:24:15 +1000476 if (dev_priv->card_type < NV_50) {
477 uint32_t frame, adjust, pte_flags = 0;
478
479 if (access != NV_DMA_ACCESS_RO)
480 pte_flags |= (1<<1);
481 adjust = offset & 0x00000fff;
482 frame = offset & ~0x00000fff;
483
Ben Skeggsb3beb162010-09-01 15:24:29 +1000484 nv_wo32(*gpuobj, 0, ((1<<12) | (1<<13) | (adjust << 20) |
485 (access << 14) | (target << 16) |
486 class));
487 nv_wo32(*gpuobj, 4, size - 1);
488 nv_wo32(*gpuobj, 8, frame | pte_flags);
489 nv_wo32(*gpuobj, 12, frame | pte_flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000490 } else {
491 uint64_t limit = offset + size - 1;
492 uint32_t flags0, flags5;
493
494 if (target == NV_DMA_TARGET_VIDMEM) {
495 flags0 = 0x00190000;
496 flags5 = 0x00010000;
497 } else {
498 flags0 = 0x7fc00000;
499 flags5 = 0x00080000;
500 }
501
Ben Skeggsb3beb162010-09-01 15:24:29 +1000502 nv_wo32(*gpuobj, 0, flags0 | class);
503 nv_wo32(*gpuobj, 4, lower_32_bits(limit));
504 nv_wo32(*gpuobj, 8, lower_32_bits(offset));
505 nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) |
506 (upper_32_bits(offset) & 0xff));
507 nv_wo32(*gpuobj, 20, flags5);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000508 }
509
Ben Skeggsf56cb862010-07-08 11:29:10 +1000510 instmem->flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000511
512 (*gpuobj)->engine = NVOBJ_ENGINE_SW;
513 (*gpuobj)->class = class;
514 return 0;
515}
516
517int
518nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
519 uint64_t offset, uint64_t size, int access,
520 struct nouveau_gpuobj **gpuobj,
521 uint32_t *o_ret)
522{
523 struct drm_device *dev = chan->dev;
524 struct drm_nouveau_private *dev_priv = dev->dev_private;
525 int ret;
526
527 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
528 (dev_priv->card_type >= NV_50 &&
529 dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
530 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
531 offset + dev_priv->vm_gart_base,
532 size, access, NV_DMA_TARGET_AGP,
533 gpuobj);
534 if (o_ret)
535 *o_ret = 0;
536 } else
537 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000538 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000539 if (offset & ~0xffffffffULL) {
540 NV_ERROR(dev, "obj offset exceeds 32-bits\n");
541 return -EINVAL;
542 }
543 if (o_ret)
544 *o_ret = (uint32_t)offset;
545 ret = (*gpuobj != NULL) ? 0 : -EINVAL;
546 } else {
547 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
548 return -EINVAL;
549 }
550
551 return ret;
552}
553
554/* Context objects in the instance RAM have the following structure.
555 * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
556
557 NV4 - NV30:
558
559 entry[0]
560 11:0 class
561 12 chroma key enable
562 13 user clip enable
563 14 swizzle enable
564 17:15 patch config:
565 scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
566 18 synchronize enable
567 19 endian: 1 big, 0 little
568 21:20 dither mode
569 23 single step enable
570 24 patch status: 0 invalid, 1 valid
571 25 context_surface 0: 1 valid
572 26 context surface 1: 1 valid
573 27 context pattern: 1 valid
574 28 context rop: 1 valid
575 29,30 context beta, beta4
576 entry[1]
577 7:0 mono format
578 15:8 color format
579 31:16 notify instance address
580 entry[2]
581 15:0 dma 0 instance address
582 31:16 dma 1 instance address
583 entry[3]
584 dma method traps
585
586 NV40:
587 No idea what the exact format is. Here's what can be deducted:
588
589 entry[0]:
590 11:0 class (maybe uses more bits here?)
591 17 user clip enable
592 21:19 patch config
593 25 patch status valid ?
594 entry[1]:
595 15:0 DMA notifier (maybe 20:0)
596 entry[2]:
597 15:0 DMA 0 instance (maybe 20:0)
598 24 big endian
599 entry[3]:
600 15:0 DMA 1 instance (maybe 20:0)
601 entry[4]:
602 entry[5]:
603 set to 0?
604*/
Ben Skeggsa6a1a382010-10-19 19:57:34 +1000605static int
606nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
607 struct nouveau_gpuobj **gpuobj_ret)
608{
609 struct drm_nouveau_private *dev_priv;
610 struct nouveau_gpuobj *gpuobj;
611
612 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
613 return -EINVAL;
614 dev_priv = chan->dev->dev_private;
615
616 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
617 if (!gpuobj)
618 return -ENOMEM;
619 gpuobj->dev = chan->dev;
620 gpuobj->engine = NVOBJ_ENGINE_SW;
621 gpuobj->class = class;
622 kref_init(&gpuobj->refcount);
623 gpuobj->cinst = 0x40;
624
625 spin_lock(&dev_priv->ramin_lock);
626 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
627 spin_unlock(&dev_priv->ramin_lock);
628 *gpuobj_ret = gpuobj;
629 return 0;
630}
631
Ben Skeggs6ee73862009-12-11 19:24:15 +1000632int
633nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
634 struct nouveau_gpuobj **gpuobj)
635{
Ben Skeggsa6a1a382010-10-19 19:57:34 +1000636 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000637 struct drm_device *dev = chan->dev;
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000638 struct nouveau_gpuobj_class *oc;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000639 int ret;
640
641 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
642
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000643 list_for_each_entry(oc, &dev_priv->classes, head) {
644 if (oc->id == class)
645 goto found;
Ben Skeggsa6a1a382010-10-19 19:57:34 +1000646 }
647
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000648 NV_ERROR(dev, "illegal object class: 0x%x\n", class);
649 return -EINVAL;
Ben Skeggsa6a1a382010-10-19 19:57:34 +1000650
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000651found:
652 if (oc->engine == NVOBJ_ENGINE_SW)
Ben Skeggsa6a1a382010-10-19 19:57:34 +1000653 return nouveau_gpuobj_sw_new(chan, class, gpuobj);
654
Ben Skeggsf4512e62010-10-20 11:47:09 +1000655 switch (oc->engine) {
656 case NVOBJ_ENGINE_GR:
657 if (dev_priv->card_type >= NV_50 && !chan->ramin_grctx) {
658 struct nouveau_pgraph_engine *pgraph =
659 &dev_priv->engine.graph;
660
661 ret = pgraph->create_context(chan);
662 if (ret)
663 return ret;
664 }
665 break;
666 case NVOBJ_ENGINE_CRYPT:
667 if (!chan->crypt_ctx) {
668 struct nouveau_crypt_engine *pcrypt =
669 &dev_priv->engine.crypt;
670
671 ret = pcrypt->create_context(chan);
672 if (ret)
673 return ret;
674 }
675 break;
676 }
677
Ben Skeggs6ee73862009-12-11 19:24:15 +1000678 ret = nouveau_gpuobj_new(dev, chan,
679 nouveau_gpuobj_class_instmem_size(dev, class),
680 16,
681 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
682 gpuobj);
683 if (ret) {
Ben Skeggsa6a1a382010-10-19 19:57:34 +1000684 NV_ERROR(dev, "error creating gpuobj: %d\n", ret);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000685 return ret;
686 }
687
Ben Skeggs6ee73862009-12-11 19:24:15 +1000688 if (dev_priv->card_type >= NV_50) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000689 nv_wo32(*gpuobj, 0, class);
690 nv_wo32(*gpuobj, 20, 0x00010000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000691 } else {
692 switch (class) {
693 case NV_CLASS_NULL:
Ben Skeggsb3beb162010-09-01 15:24:29 +1000694 nv_wo32(*gpuobj, 0, 0x00001030);
695 nv_wo32(*gpuobj, 4, 0xFFFFFFFF);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000696 break;
697 default:
698 if (dev_priv->card_type >= NV_40) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000699 nv_wo32(*gpuobj, 0, class);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000700#ifdef __BIG_ENDIAN
Ben Skeggsb3beb162010-09-01 15:24:29 +1000701 nv_wo32(*gpuobj, 8, 0x01000000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000702#endif
703 } else {
704#ifdef __BIG_ENDIAN
Ben Skeggsb3beb162010-09-01 15:24:29 +1000705 nv_wo32(*gpuobj, 0, class | 0x00080000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000706#else
Ben Skeggsb3beb162010-09-01 15:24:29 +1000707 nv_wo32(*gpuobj, 0, class);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000708#endif
709 }
710 }
711 }
Ben Skeggsf56cb862010-07-08 11:29:10 +1000712 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000713
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000714 (*gpuobj)->engine = oc->engine;
715 (*gpuobj)->class = oc->id;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000716 return 0;
717}
718
Ben Skeggs6ee73862009-12-11 19:24:15 +1000719static int
720nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
721{
722 struct drm_device *dev = chan->dev;
723 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000724 uint32_t size;
725 uint32_t base;
726 int ret;
727
728 NV_DEBUG(dev, "ch%d\n", chan->id);
729
730 /* Base amount for object storage (4KiB enough?) */
Ben Skeggsbd2e5972010-10-19 20:06:01 +1000731 size = 0x2000;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000732 base = 0;
733
734 /* PGRAPH context */
Ben Skeggs816544b2010-07-08 13:15:05 +1000735 size += dev_priv->engine.graph.grctx_size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000736
737 if (dev_priv->card_type == NV_50) {
738 /* Various fixed table thingos */
739 size += 0x1400; /* mostly unknown stuff */
740 size += 0x4000; /* vm pd */
741 base = 0x6000;
742 /* RAMHT, not sure about setting size yet, 32KiB to be safe */
743 size += 0x8000;
744 /* RAMFC */
745 size += 0x1000;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000746 }
747
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000748 ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000749 if (ret) {
750 NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
751 return ret;
752 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000753
Ben Skeggsde3a6c02010-09-01 15:24:30 +1000754 ret = drm_mm_init(&chan->ramin_heap, base, size);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000755 if (ret) {
756 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000757 nouveau_gpuobj_ref(NULL, &chan->ramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000758 return ret;
759 }
760
761 return 0;
762}
763
764int
765nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
766 uint32_t vram_h, uint32_t tt_h)
767{
768 struct drm_device *dev = chan->dev;
769 struct drm_nouveau_private *dev_priv = dev->dev_private;
770 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
771 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
772 int ret, i;
773
Ben Skeggs6ee73862009-12-11 19:24:15 +1000774 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
775
Ben Skeggs816544b2010-07-08 13:15:05 +1000776 /* Allocate a chunk of memory for per-channel object storage */
777 ret = nouveau_gpuobj_channel_init_pramin(chan);
778 if (ret) {
779 NV_ERROR(dev, "init pramin\n");
780 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000781 }
782
783 /* NV50 VM
784 * - Allocate per-channel page-directory
785 * - Map GART and VRAM into the channel's address space at the
786 * locations determined during init.
787 */
788 if (dev_priv->card_type >= NV_50) {
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000789 u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
790 u64 vm_vinst = chan->ramin->vinst + pgd_offs;
791 u32 vm_pinst = chan->ramin->pinst;
792 u32 pde;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000793
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000794 if (vm_pinst != ~0)
795 vm_pinst += pgd_offs;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000796
Ben Skeggs5125bfd2010-09-01 15:24:33 +1000797 ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000,
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000798 0, &chan->vm_pd);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000799 if (ret)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000800 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000801 for (i = 0; i < 0x4000; i += 8) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000802 nv_wo32(chan->vm_pd, i + 0, 0x00000000);
803 nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000804 }
805
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000806 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma,
807 &chan->vm_gart_pt);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000808 pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000809 nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000810 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000811
Ben Skeggsb3beb162010-09-01 15:24:29 +1000812 pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000813 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000814 nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i],
815 &chan->vm_vram_pt[i]);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000816
Ben Skeggsb3beb162010-09-01 15:24:29 +1000817 nv_wo32(chan->vm_pd, pde + 0,
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000818 chan->vm_vram_pt[i]->vinst | 0x61);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000819 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
820 pde += 8;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000821 }
822
Ben Skeggsf56cb862010-07-08 11:29:10 +1000823 instmem->flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000824 }
825
826 /* RAMHT */
827 if (dev_priv->card_type < NV_50) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000828 nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
829 } else {
830 struct nouveau_gpuobj *ramht = NULL;
831
832 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
833 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000834 if (ret)
835 return ret;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000836
837 ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
838 nouveau_gpuobj_ref(NULL, &ramht);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000839 if (ret)
840 return ret;
841 }
842
843 /* VRAM ctxdma */
844 if (dev_priv->card_type >= NV_50) {
845 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
846 0, dev_priv->vm_end,
847 NV_DMA_ACCESS_RW,
848 NV_DMA_TARGET_AGP, &vram);
849 if (ret) {
850 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
851 return ret;
852 }
853 } else {
854 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000855 0, dev_priv->fb_available_size,
856 NV_DMA_ACCESS_RW,
857 NV_DMA_TARGET_VIDMEM, &vram);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000858 if (ret) {
859 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
860 return ret;
861 }
862 }
863
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000864 ret = nouveau_ramht_insert(chan, vram_h, vram);
865 nouveau_gpuobj_ref(NULL, &vram);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000866 if (ret) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000867 NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000868 return ret;
869 }
870
871 /* TT memory ctxdma */
872 if (dev_priv->card_type >= NV_50) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000873 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
874 0, dev_priv->vm_end,
875 NV_DMA_ACCESS_RW,
876 NV_DMA_TARGET_AGP, &tt);
877 if (ret) {
878 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
879 return ret;
880 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000881 } else
882 if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
883 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
884 dev_priv->gart_info.aper_size,
885 NV_DMA_ACCESS_RW, &tt, NULL);
886 } else {
887 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
888 ret = -EINVAL;
889 }
890
891 if (ret) {
892 NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
893 return ret;
894 }
895
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000896 ret = nouveau_ramht_insert(chan, tt_h, tt);
897 nouveau_gpuobj_ref(NULL, &tt);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000898 if (ret) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000899 NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000900 return ret;
901 }
902
903 return 0;
904}
905
906void
907nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
908{
909 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
910 struct drm_device *dev = chan->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000911 int i;
912
913 NV_DEBUG(dev, "ch%d\n", chan->id);
914
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000915 if (!chan->ramht)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000916 return;
917
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000918 nouveau_ramht_ref(NULL, &chan->ramht, chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000919
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000920 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
921 nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000922 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000923 nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000924
Ben Skeggsb833ac22010-06-01 15:32:24 +1000925 if (chan->ramin_heap.free_stack.next)
926 drm_mm_takedown(&chan->ramin_heap);
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000927 nouveau_gpuobj_ref(NULL, &chan->ramin);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000928}
929
930int
931nouveau_gpuobj_suspend(struct drm_device *dev)
932{
933 struct drm_nouveau_private *dev_priv = dev->dev_private;
934 struct nouveau_gpuobj *gpuobj;
935 int i;
936
Ben Skeggs6ee73862009-12-11 19:24:15 +1000937 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
Ben Skeggsdc1e5c02010-10-25 15:23:59 +1000938 if (gpuobj->cinst != 0xdeadbeef)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000939 continue;
940
Ben Skeggsdc1e5c02010-10-25 15:23:59 +1000941 gpuobj->suspend = vmalloc(gpuobj->size);
942 if (!gpuobj->suspend) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000943 nouveau_gpuobj_resume(dev);
944 return -ENOMEM;
945 }
946
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000947 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsdc1e5c02010-10-25 15:23:59 +1000948 gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000949 }
950
951 return 0;
952}
953
954void
Ben Skeggs6ee73862009-12-11 19:24:15 +1000955nouveau_gpuobj_resume(struct drm_device *dev)
956{
957 struct drm_nouveau_private *dev_priv = dev->dev_private;
958 struct nouveau_gpuobj *gpuobj;
959 int i;
960
Ben Skeggs6ee73862009-12-11 19:24:15 +1000961 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
Ben Skeggsdc1e5c02010-10-25 15:23:59 +1000962 if (!gpuobj->suspend)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000963 continue;
964
Ben Skeggs43efc9c2010-09-01 15:24:32 +1000965 for (i = 0; i < gpuobj->size; i += 4)
Ben Skeggsdc1e5c02010-10-25 15:23:59 +1000966 nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);
967
968 vfree(gpuobj->suspend);
969 gpuobj->suspend = NULL;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000970 }
971
Ben Skeggsdc1e5c02010-10-25 15:23:59 +1000972 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000973}
974
975int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
976 struct drm_file *file_priv)
977{
Ben Skeggs6ee73862009-12-11 19:24:15 +1000978 struct drm_nouveau_grobj_alloc *init = data;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000979 struct nouveau_gpuobj *gr = NULL;
980 struct nouveau_channel *chan;
981 int ret;
982
Ben Skeggs6ee73862009-12-11 19:24:15 +1000983 if (init->handle == ~0)
984 return -EINVAL;
985
Ben Skeggscff5c132010-10-06 16:16:59 +1000986 chan = nouveau_channel_get(dev, file_priv, init->channel);
987 if (IS_ERR(chan))
988 return PTR_ERR(chan);
989
990 if (nouveau_ramht_find(chan, init->handle)) {
991 ret = -EEXIST;
992 goto out;
993 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000994
Ben Skeggsa6a1a382010-10-19 19:57:34 +1000995 ret = nouveau_gpuobj_gr_new(chan, init->class, &gr);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000996 if (ret) {
997 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
998 ret, init->channel, init->handle);
Ben Skeggscff5c132010-10-06 16:16:59 +1000999 goto out;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001000 }
1001
Ben Skeggsa8eaebc2010-09-01 15:24:31 +10001002 ret = nouveau_ramht_insert(chan, init->handle, gr);
1003 nouveau_gpuobj_ref(NULL, &gr);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001004 if (ret) {
1005 NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
1006 ret, init->channel, init->handle);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001007 }
1008
Ben Skeggscff5c132010-10-06 16:16:59 +10001009out:
1010 nouveau_channel_put(&chan);
1011 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001012}
1013
1014int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
1015 struct drm_file *file_priv)
1016{
1017 struct drm_nouveau_gpuobj_free *objfree = data;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001018 struct nouveau_channel *chan;
Ben Skeggs18a16a72010-10-12 10:11:00 +10001019 int ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001020
Ben Skeggscff5c132010-10-06 16:16:59 +10001021 chan = nouveau_channel_get(dev, file_priv, objfree->channel);
1022 if (IS_ERR(chan))
1023 return PTR_ERR(chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001024
Ben Skeggs18a16a72010-10-12 10:11:00 +10001025 ret = nouveau_ramht_remove(chan, objfree->handle);
Ben Skeggscff5c132010-10-06 16:16:59 +10001026 nouveau_channel_put(&chan);
1027 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001028}
Ben Skeggsb3beb162010-09-01 15:24:29 +10001029
1030u32
1031nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
1032{
Ben Skeggs5125bfd2010-09-01 15:24:33 +10001033 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
1034 struct drm_device *dev = gpuobj->dev;
1035
1036 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
1037 u64 ptr = gpuobj->vinst + offset;
1038 u32 base = ptr >> 16;
1039 u32 val;
1040
1041 spin_lock(&dev_priv->ramin_lock);
1042 if (dev_priv->ramin_base != base) {
1043 dev_priv->ramin_base = base;
1044 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
1045 }
1046 val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
1047 spin_unlock(&dev_priv->ramin_lock);
1048 return val;
1049 }
1050
1051 return nv_ri32(dev, gpuobj->pinst + offset);
Ben Skeggsb3beb162010-09-01 15:24:29 +10001052}
1053
1054void
1055nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
1056{
Ben Skeggs5125bfd2010-09-01 15:24:33 +10001057 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
1058 struct drm_device *dev = gpuobj->dev;
1059
1060 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
1061 u64 ptr = gpuobj->vinst + offset;
1062 u32 base = ptr >> 16;
1063
1064 spin_lock(&dev_priv->ramin_lock);
1065 if (dev_priv->ramin_base != base) {
1066 dev_priv->ramin_base = base;
1067 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
1068 }
1069 nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
1070 spin_unlock(&dev_priv->ramin_lock);
1071 return;
1072 }
1073
1074 nv_wi32(dev, gpuobj->pinst + offset, val);
Ben Skeggsb3beb162010-09-01 15:24:29 +10001075}