blob: 0cad6d834eb28cc386ca82e4df36b83374371e98 [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
30#include "drmP.h"
31
32#include "nouveau_drm.h"
33#include "nouveau_drv.h"
34#include "nouveau_dma.h"
35
36static void
37nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
38{
39 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
40 struct nouveau_bo *nvbo = nouveau_bo(bo);
41
42 ttm_bo_kunmap(&nvbo->kmap);
43
44 if (unlikely(nvbo->gem))
45 DRM_ERROR("bo %p still attached to GEM object\n", bo);
46
47 spin_lock(&dev_priv->ttm.bo_list_lock);
48 list_del(&nvbo->head);
49 spin_unlock(&dev_priv->ttm.bo_list_lock);
50 kfree(nvbo);
51}
52
53int
54nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
55 int size, int align, uint32_t flags, uint32_t tile_mode,
56 uint32_t tile_flags, bool no_vm, bool mappable,
57 struct nouveau_bo **pnvbo)
58{
59 struct drm_nouveau_private *dev_priv = dev->dev_private;
60 struct nouveau_bo *nvbo;
61 int ret, n = 0;
62
63 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
64 if (!nvbo)
65 return -ENOMEM;
66 INIT_LIST_HEAD(&nvbo->head);
67 INIT_LIST_HEAD(&nvbo->entry);
68 nvbo->mappable = mappable;
69 nvbo->no_vm = no_vm;
70 nvbo->tile_mode = tile_mode;
71 nvbo->tile_flags = tile_flags;
72
73 /*
74 * Some of the tile_flags have a periodic structure of N*4096 bytes,
75 * align to to that as well as the page size. Overallocate memory to
76 * avoid corruption of other buffer objects.
77 */
78 switch (tile_flags) {
79 case 0x1800:
80 case 0x2800:
81 case 0x4800:
82 case 0x7a00:
83 if (dev_priv->chipset >= 0xA0) {
84 /* This is based on high end cards with 448 bits
85 * memory bus, could be different elsewhere.*/
86 size += 6 * 28672;
87 /* 8 * 28672 is the actual alignment requirement,
88 * but we must also align to page size. */
89 align = 2 * 8 * 28672;
90 } else if (dev_priv->chipset >= 0x90) {
91 size += 3 * 16384;
92 align = 12 * 16834;
93 } else {
94 size += 3 * 8192;
95 /* 12 * 8192 is the actual alignment requirement,
96 * but we must also align to page size. */
97 align = 2 * 12 * 8192;
98 }
99 break;
100 default:
101 break;
102 }
103
104 align >>= PAGE_SHIFT;
105
106 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
107 if (dev_priv->card_type == NV_50) {
108 size = (size + 65535) & ~65535;
109 if (align < (65536 / PAGE_SIZE))
110 align = (65536 / PAGE_SIZE);
111 }
112
113 if (flags & TTM_PL_FLAG_VRAM)
114 nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
115 if (flags & TTM_PL_FLAG_TT)
116 nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
117 nvbo->placement.fpfn = 0;
118 nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
119 nvbo->placement.placement = nvbo->placements;
120 nvbo->placement.busy_placement = nvbo->placements;
121 nvbo->placement.num_placement = n;
122 nvbo->placement.num_busy_placement = n;
123
124 nvbo->channel = chan;
125 nouveau_bo_placement_set(nvbo, flags);
126 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
127 ttm_bo_type_device, &nvbo->placement, align, 0,
128 false, NULL, size, nouveau_bo_del_ttm);
129 nvbo->channel = NULL;
130 if (ret) {
131 /* ttm will call nouveau_bo_del_ttm if it fails.. */
132 return ret;
133 }
134
135 spin_lock(&dev_priv->ttm.bo_list_lock);
136 list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list);
137 spin_unlock(&dev_priv->ttm.bo_list_lock);
138 *pnvbo = nvbo;
139 return 0;
140}
141
142void
143nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype)
144{
145 int n = 0;
146
147 if (memtype & TTM_PL_FLAG_VRAM)
148 nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
149 if (memtype & TTM_PL_FLAG_TT)
150 nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
151 if (memtype & TTM_PL_FLAG_SYSTEM)
152 nvbo->placements[n++] = TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
153 nvbo->placement.placement = nvbo->placements;
154 nvbo->placement.busy_placement = nvbo->placements;
155 nvbo->placement.num_placement = n;
156 nvbo->placement.num_busy_placement = n;
Ben Skeggs37cb3e082009-12-16 16:22:42 +1000157
158 if (nvbo->pin_refcnt) {
159 while (n--)
160 nvbo->placements[n] |= TTM_PL_FLAG_NO_EVICT;
161 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000162}
163
164int
165nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
166{
167 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
168 struct ttm_buffer_object *bo = &nvbo->bo;
169 int ret, i;
170
171 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
172 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
173 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
174 1 << bo->mem.mem_type, memtype);
175 return -EINVAL;
176 }
177
178 if (nvbo->pin_refcnt++)
179 return 0;
180
181 ret = ttm_bo_reserve(bo, false, false, false, 0);
182 if (ret)
183 goto out;
184
185 nouveau_bo_placement_set(nvbo, memtype);
186 for (i = 0; i < nvbo->placement.num_placement; i++)
187 nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
188
189 ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
190 if (ret == 0) {
191 switch (bo->mem.mem_type) {
192 case TTM_PL_VRAM:
193 dev_priv->fb_aper_free -= bo->mem.size;
194 break;
195 case TTM_PL_TT:
196 dev_priv->gart_info.aper_free -= bo->mem.size;
197 break;
198 default:
199 break;
200 }
201 }
202 ttm_bo_unreserve(bo);
203out:
204 if (unlikely(ret))
205 nvbo->pin_refcnt--;
206 return ret;
207}
208
209int
210nouveau_bo_unpin(struct nouveau_bo *nvbo)
211{
212 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
213 struct ttm_buffer_object *bo = &nvbo->bo;
214 int ret, i;
215
216 if (--nvbo->pin_refcnt)
217 return 0;
218
219 ret = ttm_bo_reserve(bo, false, false, false, 0);
220 if (ret)
221 return ret;
222
223 for (i = 0; i < nvbo->placement.num_placement; i++)
224 nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
225
226 ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
227 if (ret == 0) {
228 switch (bo->mem.mem_type) {
229 case TTM_PL_VRAM:
230 dev_priv->fb_aper_free += bo->mem.size;
231 break;
232 case TTM_PL_TT:
233 dev_priv->gart_info.aper_free += bo->mem.size;
234 break;
235 default:
236 break;
237 }
238 }
239
240 ttm_bo_unreserve(bo);
241 return ret;
242}
243
244int
245nouveau_bo_map(struct nouveau_bo *nvbo)
246{
247 int ret;
248
249 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
250 if (ret)
251 return ret;
252
253 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
254 ttm_bo_unreserve(&nvbo->bo);
255 return ret;
256}
257
258void
259nouveau_bo_unmap(struct nouveau_bo *nvbo)
260{
261 ttm_bo_kunmap(&nvbo->kmap);
262}
263
264u16
265nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
266{
267 bool is_iomem;
268 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
269 mem = &mem[index];
270 if (is_iomem)
271 return ioread16_native((void __force __iomem *)mem);
272 else
273 return *mem;
274}
275
276void
277nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
278{
279 bool is_iomem;
280 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
281 mem = &mem[index];
282 if (is_iomem)
283 iowrite16_native(val, (void __force __iomem *)mem);
284 else
285 *mem = val;
286}
287
288u32
289nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
290{
291 bool is_iomem;
292 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
293 mem = &mem[index];
294 if (is_iomem)
295 return ioread32_native((void __force __iomem *)mem);
296 else
297 return *mem;
298}
299
300void
301nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
302{
303 bool is_iomem;
304 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
305 mem = &mem[index];
306 if (is_iomem)
307 iowrite32_native(val, (void __force __iomem *)mem);
308 else
309 *mem = val;
310}
311
312static struct ttm_backend *
313nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
314{
315 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
316 struct drm_device *dev = dev_priv->dev;
317
318 switch (dev_priv->gart_info.type) {
Ben Skeggsb694dfb2009-12-15 10:38:32 +1000319#if __OS_HAS_AGP
Ben Skeggs6ee73862009-12-11 19:24:15 +1000320 case NOUVEAU_GART_AGP:
321 return ttm_agp_backend_init(bdev, dev->agp->bridge);
Ben Skeggsb694dfb2009-12-15 10:38:32 +1000322#endif
Ben Skeggs6ee73862009-12-11 19:24:15 +1000323 case NOUVEAU_GART_SGDMA:
324 return nouveau_sgdma_init_ttm(dev);
325 default:
326 NV_ERROR(dev, "Unknown GART type %d\n",
327 dev_priv->gart_info.type);
328 break;
329 }
330
331 return NULL;
332}
333
334static int
335nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
336{
337 /* We'll do this from user space. */
338 return 0;
339}
340
341static int
342nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
343 struct ttm_mem_type_manager *man)
344{
345 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
346 struct drm_device *dev = dev_priv->dev;
347
348 switch (type) {
349 case TTM_PL_SYSTEM:
350 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
351 man->available_caching = TTM_PL_MASK_CACHING;
352 man->default_caching = TTM_PL_FLAG_CACHED;
353 break;
354 case TTM_PL_VRAM:
355 man->flags = TTM_MEMTYPE_FLAG_FIXED |
356 TTM_MEMTYPE_FLAG_MAPPABLE |
357 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
358 man->available_caching = TTM_PL_FLAG_UNCACHED |
359 TTM_PL_FLAG_WC;
360 man->default_caching = TTM_PL_FLAG_WC;
361
362 man->io_addr = NULL;
363 man->io_offset = drm_get_resource_start(dev, 1);
364 man->io_size = drm_get_resource_len(dev, 1);
365 if (man->io_size > nouveau_mem_fb_amount(dev))
366 man->io_size = nouveau_mem_fb_amount(dev);
367
368 man->gpu_offset = dev_priv->vm_vram_base;
369 break;
370 case TTM_PL_TT:
371 switch (dev_priv->gart_info.type) {
372 case NOUVEAU_GART_AGP:
373 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
374 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
375 man->available_caching = TTM_PL_FLAG_UNCACHED;
376 man->default_caching = TTM_PL_FLAG_UNCACHED;
377 break;
378 case NOUVEAU_GART_SGDMA:
379 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
380 TTM_MEMTYPE_FLAG_CMA;
381 man->available_caching = TTM_PL_MASK_CACHING;
382 man->default_caching = TTM_PL_FLAG_CACHED;
383 break;
384 default:
385 NV_ERROR(dev, "Unknown GART type: %d\n",
386 dev_priv->gart_info.type);
387 return -EINVAL;
388 }
389
390 man->io_offset = dev_priv->gart_info.aper_base;
391 man->io_size = dev_priv->gart_info.aper_size;
392 man->io_addr = NULL;
393 man->gpu_offset = dev_priv->vm_gart_base;
394 break;
395 default:
396 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
397 return -EINVAL;
398 }
399 return 0;
400}
401
402static void
403nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
404{
405 struct nouveau_bo *nvbo = nouveau_bo(bo);
406
407 switch (bo->mem.mem_type) {
Francisco Jerez22fbd532009-12-11 18:40:17 +0100408 case TTM_PL_VRAM:
409 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT |
410 TTM_PL_FLAG_SYSTEM);
411 break;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000412 default:
413 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);
414 break;
415 }
Francisco Jerez22fbd532009-12-11 18:40:17 +0100416
417 *pl = nvbo->placement;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000418}
419
420
421/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
422 * TTM_PL_{VRAM,TT} directly.
423 */
424static int
425nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
426 struct nouveau_bo *nvbo, bool evict, bool no_wait,
427 struct ttm_mem_reg *new_mem)
428{
429 struct nouveau_fence *fence = NULL;
430 int ret;
431
432 ret = nouveau_fence_new(chan, &fence, true);
433 if (ret)
434 return ret;
435
436 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
437 evict, no_wait, new_mem);
438 nouveau_fence_unref((void *)&fence);
439 return ret;
440}
441
442static inline uint32_t
443nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
444 struct ttm_mem_reg *mem)
445{
446 if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) {
447 if (mem->mem_type == TTM_PL_TT)
448 return NvDmaGART;
449 return NvDmaVRAM;
450 }
451
452 if (mem->mem_type == TTM_PL_TT)
453 return chan->gart_handle;
454 return chan->vram_handle;
455}
456
457static int
458nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, int no_wait,
459 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
460{
461 struct nouveau_bo *nvbo = nouveau_bo(bo);
462 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
463 struct nouveau_channel *chan;
464 uint64_t src_offset, dst_offset;
465 uint32_t page_count;
466 int ret;
467
468 chan = nvbo->channel;
Ben Skeggs0735f622009-12-16 14:28:55 +1000469 if (!chan || nvbo->tile_flags || nvbo->no_vm)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000470 chan = dev_priv->channel;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000471
472 src_offset = old_mem->mm_node->start << PAGE_SHIFT;
473 dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
474 if (chan != dev_priv->channel) {
475 if (old_mem->mem_type == TTM_PL_TT)
476 src_offset += dev_priv->vm_gart_base;
477 else
478 src_offset += dev_priv->vm_vram_base;
479
480 if (new_mem->mem_type == TTM_PL_TT)
481 dst_offset += dev_priv->vm_gart_base;
482 else
483 dst_offset += dev_priv->vm_vram_base;
484 }
485
486 ret = RING_SPACE(chan, 3);
487 if (ret)
488 return ret;
489 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
490 OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem));
491 OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem));
492
493 if (dev_priv->card_type >= NV_50) {
494 ret = RING_SPACE(chan, 4);
495 if (ret)
496 return ret;
497 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
498 OUT_RING(chan, 1);
499 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
500 OUT_RING(chan, 1);
501 }
502
503 page_count = new_mem->num_pages;
504 while (page_count) {
505 int line_count = (page_count > 2047) ? 2047 : page_count;
506
507 if (dev_priv->card_type >= NV_50) {
508 ret = RING_SPACE(chan, 3);
509 if (ret)
510 return ret;
511 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
512 OUT_RING(chan, upper_32_bits(src_offset));
513 OUT_RING(chan, upper_32_bits(dst_offset));
514 }
515 ret = RING_SPACE(chan, 11);
516 if (ret)
517 return ret;
518 BEGIN_RING(chan, NvSubM2MF,
519 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
520 OUT_RING(chan, lower_32_bits(src_offset));
521 OUT_RING(chan, lower_32_bits(dst_offset));
522 OUT_RING(chan, PAGE_SIZE); /* src_pitch */
523 OUT_RING(chan, PAGE_SIZE); /* dst_pitch */
524 OUT_RING(chan, PAGE_SIZE); /* line_length */
525 OUT_RING(chan, line_count);
526 OUT_RING(chan, (1<<8)|(1<<0));
527 OUT_RING(chan, 0);
528 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
529 OUT_RING(chan, 0);
530
531 page_count -= line_count;
532 src_offset += (PAGE_SIZE * line_count);
533 dst_offset += (PAGE_SIZE * line_count);
534 }
535
536 return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem);
537}
538
539static int
540nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
541 bool no_wait, struct ttm_mem_reg *new_mem)
542{
543 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
544 struct ttm_placement placement;
545 struct ttm_mem_reg tmp_mem;
546 int ret;
547
548 placement.fpfn = placement.lpfn = 0;
549 placement.num_placement = placement.num_busy_placement = 1;
550 placement.placement = &placement_memtype;
551
552 tmp_mem = *new_mem;
553 tmp_mem.mm_node = NULL;
554 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
555 if (ret)
556 return ret;
557
558 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
559 if (ret)
560 goto out;
561
562 ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, &tmp_mem);
563 if (ret)
564 goto out;
565
566 ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
567out:
568 if (tmp_mem.mm_node) {
569 spin_lock(&bo->bdev->glob->lru_lock);
570 drm_mm_put_block(tmp_mem.mm_node);
571 spin_unlock(&bo->bdev->glob->lru_lock);
572 }
573
574 return ret;
575}
576
577static int
578nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
579 bool no_wait, struct ttm_mem_reg *new_mem)
580{
581 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
582 struct ttm_placement placement;
583 struct ttm_mem_reg tmp_mem;
584 int ret;
585
586 placement.fpfn = placement.lpfn = 0;
587 placement.num_placement = placement.num_busy_placement = 1;
588 placement.placement = &placement_memtype;
589
590 tmp_mem = *new_mem;
591 tmp_mem.mm_node = NULL;
592 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
593 if (ret)
594 return ret;
595
596 ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem);
597 if (ret)
598 goto out;
599
600 ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, new_mem);
601 if (ret)
602 goto out;
603
604out:
605 if (tmp_mem.mm_node) {
606 spin_lock(&bo->bdev->glob->lru_lock);
607 drm_mm_put_block(tmp_mem.mm_node);
608 spin_unlock(&bo->bdev->glob->lru_lock);
609 }
610
611 return ret;
612}
613
614static int
615nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
616 bool no_wait, struct ttm_mem_reg *new_mem)
617{
618 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
619 struct nouveau_bo *nvbo = nouveau_bo(bo);
620 struct drm_device *dev = dev_priv->dev;
621 struct ttm_mem_reg *old_mem = &bo->mem;
622 int ret;
623
624 if (dev_priv->card_type == NV_50 && new_mem->mem_type == TTM_PL_VRAM &&
625 !nvbo->no_vm) {
626 uint64_t offset = new_mem->mm_node->start << PAGE_SHIFT;
627
628 ret = nv50_mem_vm_bind_linear(dev,
629 offset + dev_priv->vm_vram_base,
630 new_mem->size, nvbo->tile_flags,
631 offset);
632 if (ret)
633 return ret;
634 }
635
Ben Skeggs0735f622009-12-16 14:28:55 +1000636 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
637 !dev_priv->channel)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000638 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
639
640 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
641 BUG_ON(bo->mem.mm_node != NULL);
642 bo->mem = *new_mem;
643 new_mem->mm_node = NULL;
644 return 0;
645 }
646
647 if (new_mem->mem_type == TTM_PL_SYSTEM) {
648 if (old_mem->mem_type == TTM_PL_SYSTEM)
649 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
650 if (nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem))
651 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
652 } else if (old_mem->mem_type == TTM_PL_SYSTEM) {
653 if (nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem))
654 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
655 } else {
656 if (nouveau_bo_move_m2mf(bo, evict, no_wait, old_mem, new_mem))
657 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
658 }
659
660 return 0;
661}
662
663static int
664nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
665{
666 return 0;
667}
668
669struct ttm_bo_driver nouveau_bo_driver = {
670 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
671 .invalidate_caches = nouveau_bo_invalidate_caches,
672 .init_mem_type = nouveau_bo_init_mem_type,
673 .evict_flags = nouveau_bo_evict_flags,
674 .move = nouveau_bo_move,
675 .verify_access = nouveau_bo_verify_access,
676 .sync_obj_signaled = nouveau_fence_signalled,
677 .sync_obj_wait = nouveau_fence_wait,
678 .sync_obj_flush = nouveau_fence_flush,
679 .sync_obj_unref = nouveau_fence_unref,
680 .sync_obj_ref = nouveau_fence_ref,
681};
682