blob: db0ed4c13f9888d28f24577cb94c4add63856617 [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
30#include "drmP.h"
31
32#include "nouveau_drm.h"
33#include "nouveau_drv.h"
34#include "nouveau_dma.h"
35
Maarten Maathuisa5106042009-12-26 21:46:36 +010036#include <linux/log2.h>
37
Ben Skeggs6ee73862009-12-11 19:24:15 +100038static void
39nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
40{
41 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
Francisco Jereza0af9ad2009-12-11 16:51:09 +010042 struct drm_device *dev = dev_priv->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +100043 struct nouveau_bo *nvbo = nouveau_bo(bo);
44
45 ttm_bo_kunmap(&nvbo->kmap);
46
47 if (unlikely(nvbo->gem))
48 DRM_ERROR("bo %p still attached to GEM object\n", bo);
49
Francisco Jereza0af9ad2009-12-11 16:51:09 +010050 if (nvbo->tile)
51 nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
52
Ben Skeggs6ee73862009-12-11 19:24:15 +100053 spin_lock(&dev_priv->ttm.bo_list_lock);
54 list_del(&nvbo->head);
55 spin_unlock(&dev_priv->ttm.bo_list_lock);
56 kfree(nvbo);
57}
58
Francisco Jereza0af9ad2009-12-11 16:51:09 +010059static void
60nouveau_bo_fixup_align(struct drm_device *dev,
61 uint32_t tile_mode, uint32_t tile_flags,
62 int *align, int *size)
63{
64 struct drm_nouveau_private *dev_priv = dev->dev_private;
65
66 /*
67 * Some of the tile_flags have a periodic structure of N*4096 bytes,
68 * align to to that as well as the page size. Overallocate memory to
69 * avoid corruption of other buffer objects.
70 */
71 if (dev_priv->card_type == NV_50) {
Maarten Maathuisa5106042009-12-26 21:46:36 +010072 uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15;
73 int i;
74
Francisco Jereza0af9ad2009-12-11 16:51:09 +010075 switch (tile_flags) {
76 case 0x1800:
77 case 0x2800:
78 case 0x4800:
79 case 0x7a00:
Maarten Maathuisa5106042009-12-26 21:46:36 +010080 *size = roundup(*size, block_size);
81 if (is_power_of_2(block_size)) {
82 *size += 3 * block_size;
83 for (i = 1; i < 10; i++) {
84 *align = 12 * i * block_size;
85 if (!(*align % 65536))
86 break;
87 }
Francisco Jereza0af9ad2009-12-11 16:51:09 +010088 } else {
Maarten Maathuisa5106042009-12-26 21:46:36 +010089 *size += 6 * block_size;
90 for (i = 1; i < 10; i++) {
91 *align = 8 * i * block_size;
92 if (!(*align % 65536))
93 break;
94 }
Francisco Jereza0af9ad2009-12-11 16:51:09 +010095 }
96 break;
97 default:
98 break;
99 }
100
101 } else {
102 if (tile_mode) {
103 if (dev_priv->chipset >= 0x40) {
104 *align = 65536;
105 *size = roundup(*size, 64 * tile_mode);
106
107 } else if (dev_priv->chipset >= 0x30) {
108 *align = 32768;
109 *size = roundup(*size, 64 * tile_mode);
110
111 } else if (dev_priv->chipset >= 0x20) {
112 *align = 16384;
113 *size = roundup(*size, 64 * tile_mode);
114
115 } else if (dev_priv->chipset >= 0x10) {
116 *align = 16384;
117 *size = roundup(*size, 32 * tile_mode);
118 }
119 }
120 }
121
Maarten Maathuis1c7059e2009-12-25 18:51:17 +0100122 /* ALIGN works only on powers of two. */
123 *size = roundup(*size, PAGE_SIZE);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100124
125 if (dev_priv->card_type == NV_50) {
Maarten Maathuis1c7059e2009-12-25 18:51:17 +0100126 *size = roundup(*size, 65536);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100127 *align = max(65536, *align);
128 }
129}
130
Ben Skeggs6ee73862009-12-11 19:24:15 +1000131int
132nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
133 int size, int align, uint32_t flags, uint32_t tile_mode,
134 uint32_t tile_flags, bool no_vm, bool mappable,
135 struct nouveau_bo **pnvbo)
136{
137 struct drm_nouveau_private *dev_priv = dev->dev_private;
138 struct nouveau_bo *nvbo;
Francisco Jerez8dea4a12009-12-16 19:03:28 +0100139 int ret = 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000140
141 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
142 if (!nvbo)
143 return -ENOMEM;
144 INIT_LIST_HEAD(&nvbo->head);
145 INIT_LIST_HEAD(&nvbo->entry);
146 nvbo->mappable = mappable;
147 nvbo->no_vm = no_vm;
148 nvbo->tile_mode = tile_mode;
149 nvbo->tile_flags = tile_flags;
150
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100151 nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000152 align >>= PAGE_SHIFT;
153
Ben Skeggs6ee73862009-12-11 19:24:15 +1000154 nvbo->placement.fpfn = 0;
155 nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
Francisco Jerez8dea4a12009-12-16 19:03:28 +0100156 nouveau_bo_placement_set(nvbo, flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000157
158 nvbo->channel = chan;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000159 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
160 ttm_bo_type_device, &nvbo->placement, align, 0,
161 false, NULL, size, nouveau_bo_del_ttm);
162 nvbo->channel = NULL;
163 if (ret) {
164 /* ttm will call nouveau_bo_del_ttm if it fails.. */
165 return ret;
166 }
167
168 spin_lock(&dev_priv->ttm.bo_list_lock);
169 list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list);
170 spin_unlock(&dev_priv->ttm.bo_list_lock);
171 *pnvbo = nvbo;
172 return 0;
173}
174
175void
176nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype)
177{
178 int n = 0;
179
180 if (memtype & TTM_PL_FLAG_VRAM)
181 nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
182 if (memtype & TTM_PL_FLAG_TT)
183 nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
184 if (memtype & TTM_PL_FLAG_SYSTEM)
185 nvbo->placements[n++] = TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
186 nvbo->placement.placement = nvbo->placements;
187 nvbo->placement.busy_placement = nvbo->placements;
188 nvbo->placement.num_placement = n;
189 nvbo->placement.num_busy_placement = n;
Ben Skeggs37cb3e082009-12-16 16:22:42 +1000190
191 if (nvbo->pin_refcnt) {
192 while (n--)
193 nvbo->placements[n] |= TTM_PL_FLAG_NO_EVICT;
194 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000195}
196
197int
198nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
199{
200 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
201 struct ttm_buffer_object *bo = &nvbo->bo;
202 int ret, i;
203
204 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
205 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
206 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
207 1 << bo->mem.mem_type, memtype);
208 return -EINVAL;
209 }
210
211 if (nvbo->pin_refcnt++)
212 return 0;
213
214 ret = ttm_bo_reserve(bo, false, false, false, 0);
215 if (ret)
216 goto out;
217
218 nouveau_bo_placement_set(nvbo, memtype);
219 for (i = 0; i < nvbo->placement.num_placement; i++)
220 nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
221
222 ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
223 if (ret == 0) {
224 switch (bo->mem.mem_type) {
225 case TTM_PL_VRAM:
226 dev_priv->fb_aper_free -= bo->mem.size;
227 break;
228 case TTM_PL_TT:
229 dev_priv->gart_info.aper_free -= bo->mem.size;
230 break;
231 default:
232 break;
233 }
234 }
235 ttm_bo_unreserve(bo);
236out:
237 if (unlikely(ret))
238 nvbo->pin_refcnt--;
239 return ret;
240}
241
242int
243nouveau_bo_unpin(struct nouveau_bo *nvbo)
244{
245 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
246 struct ttm_buffer_object *bo = &nvbo->bo;
247 int ret, i;
248
249 if (--nvbo->pin_refcnt)
250 return 0;
251
252 ret = ttm_bo_reserve(bo, false, false, false, 0);
253 if (ret)
254 return ret;
255
256 for (i = 0; i < nvbo->placement.num_placement; i++)
257 nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
258
259 ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
260 if (ret == 0) {
261 switch (bo->mem.mem_type) {
262 case TTM_PL_VRAM:
263 dev_priv->fb_aper_free += bo->mem.size;
264 break;
265 case TTM_PL_TT:
266 dev_priv->gart_info.aper_free += bo->mem.size;
267 break;
268 default:
269 break;
270 }
271 }
272
273 ttm_bo_unreserve(bo);
274 return ret;
275}
276
277int
278nouveau_bo_map(struct nouveau_bo *nvbo)
279{
280 int ret;
281
282 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
283 if (ret)
284 return ret;
285
286 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
287 ttm_bo_unreserve(&nvbo->bo);
288 return ret;
289}
290
291void
292nouveau_bo_unmap(struct nouveau_bo *nvbo)
293{
294 ttm_bo_kunmap(&nvbo->kmap);
295}
296
297u16
298nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
299{
300 bool is_iomem;
301 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
302 mem = &mem[index];
303 if (is_iomem)
304 return ioread16_native((void __force __iomem *)mem);
305 else
306 return *mem;
307}
308
309void
310nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
311{
312 bool is_iomem;
313 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
314 mem = &mem[index];
315 if (is_iomem)
316 iowrite16_native(val, (void __force __iomem *)mem);
317 else
318 *mem = val;
319}
320
321u32
322nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
323{
324 bool is_iomem;
325 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
326 mem = &mem[index];
327 if (is_iomem)
328 return ioread32_native((void __force __iomem *)mem);
329 else
330 return *mem;
331}
332
333void
334nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
335{
336 bool is_iomem;
337 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
338 mem = &mem[index];
339 if (is_iomem)
340 iowrite32_native(val, (void __force __iomem *)mem);
341 else
342 *mem = val;
343}
344
345static struct ttm_backend *
346nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
347{
348 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
349 struct drm_device *dev = dev_priv->dev;
350
351 switch (dev_priv->gart_info.type) {
Ben Skeggsb694dfb2009-12-15 10:38:32 +1000352#if __OS_HAS_AGP
Ben Skeggs6ee73862009-12-11 19:24:15 +1000353 case NOUVEAU_GART_AGP:
354 return ttm_agp_backend_init(bdev, dev->agp->bridge);
Ben Skeggsb694dfb2009-12-15 10:38:32 +1000355#endif
Ben Skeggs6ee73862009-12-11 19:24:15 +1000356 case NOUVEAU_GART_SGDMA:
357 return nouveau_sgdma_init_ttm(dev);
358 default:
359 NV_ERROR(dev, "Unknown GART type %d\n",
360 dev_priv->gart_info.type);
361 break;
362 }
363
364 return NULL;
365}
366
367static int
368nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
369{
370 /* We'll do this from user space. */
371 return 0;
372}
373
374static int
375nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
376 struct ttm_mem_type_manager *man)
377{
378 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
379 struct drm_device *dev = dev_priv->dev;
380
381 switch (type) {
382 case TTM_PL_SYSTEM:
383 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
384 man->available_caching = TTM_PL_MASK_CACHING;
385 man->default_caching = TTM_PL_FLAG_CACHED;
386 break;
387 case TTM_PL_VRAM:
388 man->flags = TTM_MEMTYPE_FLAG_FIXED |
389 TTM_MEMTYPE_FLAG_MAPPABLE |
390 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
391 man->available_caching = TTM_PL_FLAG_UNCACHED |
392 TTM_PL_FLAG_WC;
393 man->default_caching = TTM_PL_FLAG_WC;
394
395 man->io_addr = NULL;
396 man->io_offset = drm_get_resource_start(dev, 1);
397 man->io_size = drm_get_resource_len(dev, 1);
398 if (man->io_size > nouveau_mem_fb_amount(dev))
399 man->io_size = nouveau_mem_fb_amount(dev);
400
401 man->gpu_offset = dev_priv->vm_vram_base;
402 break;
403 case TTM_PL_TT:
404 switch (dev_priv->gart_info.type) {
405 case NOUVEAU_GART_AGP:
406 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
407 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
408 man->available_caching = TTM_PL_FLAG_UNCACHED;
409 man->default_caching = TTM_PL_FLAG_UNCACHED;
410 break;
411 case NOUVEAU_GART_SGDMA:
412 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
413 TTM_MEMTYPE_FLAG_CMA;
414 man->available_caching = TTM_PL_MASK_CACHING;
415 man->default_caching = TTM_PL_FLAG_CACHED;
416 break;
417 default:
418 NV_ERROR(dev, "Unknown GART type: %d\n",
419 dev_priv->gart_info.type);
420 return -EINVAL;
421 }
422
423 man->io_offset = dev_priv->gart_info.aper_base;
424 man->io_size = dev_priv->gart_info.aper_size;
425 man->io_addr = NULL;
426 man->gpu_offset = dev_priv->vm_gart_base;
427 break;
428 default:
429 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
430 return -EINVAL;
431 }
432 return 0;
433}
434
435static void
436nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
437{
438 struct nouveau_bo *nvbo = nouveau_bo(bo);
439
440 switch (bo->mem.mem_type) {
Francisco Jerez22fbd532009-12-11 18:40:17 +0100441 case TTM_PL_VRAM:
442 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT |
443 TTM_PL_FLAG_SYSTEM);
444 break;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000445 default:
446 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);
447 break;
448 }
Francisco Jerez22fbd532009-12-11 18:40:17 +0100449
450 *pl = nvbo->placement;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000451}
452
453
454/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
455 * TTM_PL_{VRAM,TT} directly.
456 */
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100457
Ben Skeggs6ee73862009-12-11 19:24:15 +1000458static int
459nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
460 struct nouveau_bo *nvbo, bool evict, bool no_wait,
461 struct ttm_mem_reg *new_mem)
462{
463 struct nouveau_fence *fence = NULL;
464 int ret;
465
466 ret = nouveau_fence_new(chan, &fence, true);
467 if (ret)
468 return ret;
469
470 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
471 evict, no_wait, new_mem);
Ben Skeggse147eae2010-01-12 15:28:19 +1000472 if (nvbo->channel && nvbo->channel != chan)
473 ret = nouveau_fence_wait(fence, NULL, false, false);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000474 nouveau_fence_unref((void *)&fence);
475 return ret;
476}
477
478static inline uint32_t
479nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
480 struct ttm_mem_reg *mem)
481{
482 if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) {
483 if (mem->mem_type == TTM_PL_TT)
484 return NvDmaGART;
485 return NvDmaVRAM;
486 }
487
488 if (mem->mem_type == TTM_PL_TT)
489 return chan->gart_handle;
490 return chan->vram_handle;
491}
492
493static int
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100494nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
495 int no_wait, struct ttm_mem_reg *new_mem)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000496{
497 struct nouveau_bo *nvbo = nouveau_bo(bo);
498 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100499 struct ttm_mem_reg *old_mem = &bo->mem;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000500 struct nouveau_channel *chan;
501 uint64_t src_offset, dst_offset;
502 uint32_t page_count;
503 int ret;
504
505 chan = nvbo->channel;
Ben Skeggs0735f622009-12-16 14:28:55 +1000506 if (!chan || nvbo->tile_flags || nvbo->no_vm)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000507 chan = dev_priv->channel;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000508
509 src_offset = old_mem->mm_node->start << PAGE_SHIFT;
510 dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
511 if (chan != dev_priv->channel) {
512 if (old_mem->mem_type == TTM_PL_TT)
513 src_offset += dev_priv->vm_gart_base;
514 else
515 src_offset += dev_priv->vm_vram_base;
516
517 if (new_mem->mem_type == TTM_PL_TT)
518 dst_offset += dev_priv->vm_gart_base;
519 else
520 dst_offset += dev_priv->vm_vram_base;
521 }
522
523 ret = RING_SPACE(chan, 3);
524 if (ret)
525 return ret;
526 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
527 OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem));
528 OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem));
529
530 if (dev_priv->card_type >= NV_50) {
531 ret = RING_SPACE(chan, 4);
532 if (ret)
533 return ret;
534 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
535 OUT_RING(chan, 1);
536 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
537 OUT_RING(chan, 1);
538 }
539
540 page_count = new_mem->num_pages;
541 while (page_count) {
542 int line_count = (page_count > 2047) ? 2047 : page_count;
543
544 if (dev_priv->card_type >= NV_50) {
545 ret = RING_SPACE(chan, 3);
546 if (ret)
547 return ret;
548 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
549 OUT_RING(chan, upper_32_bits(src_offset));
550 OUT_RING(chan, upper_32_bits(dst_offset));
551 }
552 ret = RING_SPACE(chan, 11);
553 if (ret)
554 return ret;
555 BEGIN_RING(chan, NvSubM2MF,
556 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
557 OUT_RING(chan, lower_32_bits(src_offset));
558 OUT_RING(chan, lower_32_bits(dst_offset));
559 OUT_RING(chan, PAGE_SIZE); /* src_pitch */
560 OUT_RING(chan, PAGE_SIZE); /* dst_pitch */
561 OUT_RING(chan, PAGE_SIZE); /* line_length */
562 OUT_RING(chan, line_count);
563 OUT_RING(chan, (1<<8)|(1<<0));
564 OUT_RING(chan, 0);
565 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
566 OUT_RING(chan, 0);
567
568 page_count -= line_count;
569 src_offset += (PAGE_SIZE * line_count);
570 dst_offset += (PAGE_SIZE * line_count);
571 }
572
573 return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem);
574}
575
576static int
577nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
578 bool no_wait, struct ttm_mem_reg *new_mem)
579{
580 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
581 struct ttm_placement placement;
582 struct ttm_mem_reg tmp_mem;
583 int ret;
584
585 placement.fpfn = placement.lpfn = 0;
586 placement.num_placement = placement.num_busy_placement = 1;
Francisco Jerez77e2b5e2009-12-16 19:05:00 +0100587 placement.placement = placement.busy_placement = &placement_memtype;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000588
589 tmp_mem = *new_mem;
590 tmp_mem.mm_node = NULL;
591 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
592 if (ret)
593 return ret;
594
595 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
596 if (ret)
597 goto out;
598
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100599 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000600 if (ret)
601 goto out;
602
603 ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
604out:
605 if (tmp_mem.mm_node) {
606 spin_lock(&bo->bdev->glob->lru_lock);
607 drm_mm_put_block(tmp_mem.mm_node);
608 spin_unlock(&bo->bdev->glob->lru_lock);
609 }
610
611 return ret;
612}
613
614static int
615nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
616 bool no_wait, struct ttm_mem_reg *new_mem)
617{
618 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
619 struct ttm_placement placement;
620 struct ttm_mem_reg tmp_mem;
621 int ret;
622
623 placement.fpfn = placement.lpfn = 0;
624 placement.num_placement = placement.num_busy_placement = 1;
Francisco Jerez77e2b5e2009-12-16 19:05:00 +0100625 placement.placement = placement.busy_placement = &placement_memtype;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000626
627 tmp_mem = *new_mem;
628 tmp_mem.mm_node = NULL;
629 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
630 if (ret)
631 return ret;
632
633 ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem);
634 if (ret)
635 goto out;
636
Francisco Jerez27f691a2009-12-16 19:05:38 +0100637 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000638 if (ret)
639 goto out;
640
641out:
642 if (tmp_mem.mm_node) {
643 spin_lock(&bo->bdev->glob->lru_lock);
644 drm_mm_put_block(tmp_mem.mm_node);
645 spin_unlock(&bo->bdev->glob->lru_lock);
646 }
647
648 return ret;
649}
650
651static int
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100652nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
653 struct nouveau_tile_reg **new_tile)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000654{
655 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000656 struct drm_device *dev = dev_priv->dev;
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100657 struct nouveau_bo *nvbo = nouveau_bo(bo);
658 uint64_t offset;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000659 int ret;
660
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100661 if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
662 /* Nothing to do. */
663 *new_tile = NULL;
664 return 0;
665 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000666
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100667 offset = new_mem->mm_node->start << PAGE_SHIFT;
668
669 if (dev_priv->card_type == NV_50) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000670 ret = nv50_mem_vm_bind_linear(dev,
671 offset + dev_priv->vm_vram_base,
672 new_mem->size, nvbo->tile_flags,
673 offset);
674 if (ret)
675 return ret;
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100676
677 } else if (dev_priv->card_type >= NV_10) {
678 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
679 nvbo->tile_mode);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000680 }
681
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100682 return 0;
683}
Ben Skeggs6ee73862009-12-11 19:24:15 +1000684
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100685static void
686nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
687 struct nouveau_tile_reg *new_tile,
688 struct nouveau_tile_reg **old_tile)
689{
690 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
691 struct drm_device *dev = dev_priv->dev;
692
693 if (dev_priv->card_type >= NV_10 &&
694 dev_priv->card_type < NV_50) {
695 if (*old_tile)
696 nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
697
698 *old_tile = new_tile;
699 }
700}
701
702static int
703nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
704 bool no_wait, struct ttm_mem_reg *new_mem)
705{
706 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
707 struct nouveau_bo *nvbo = nouveau_bo(bo);
708 struct ttm_mem_reg *old_mem = &bo->mem;
709 struct nouveau_tile_reg *new_tile = NULL;
710 int ret = 0;
711
712 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
713 if (ret)
714 return ret;
715
716 /* Software copy if the card isn't up and running yet. */
717 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
718 !dev_priv->channel) {
719 ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
720 goto out;
721 }
722
723 /* Fake bo copy. */
Ben Skeggs6ee73862009-12-11 19:24:15 +1000724 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
725 BUG_ON(bo->mem.mm_node != NULL);
726 bo->mem = *new_mem;
727 new_mem->mm_node = NULL;
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100728 goto out;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000729 }
730
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100731 /* Hardware assisted copy. */
732 if (new_mem->mem_type == TTM_PL_SYSTEM)
733 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem);
734 else if (old_mem->mem_type == TTM_PL_SYSTEM)
735 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem);
736 else
737 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000738
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100739 if (!ret)
740 goto out;
741
742 /* Fallback to software copy. */
743 ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
744
745out:
746 if (ret)
747 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
748 else
749 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
750
751 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000752}
753
754static int
755nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
756{
757 return 0;
758}
759
760struct ttm_bo_driver nouveau_bo_driver = {
761 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
762 .invalidate_caches = nouveau_bo_invalidate_caches,
763 .init_mem_type = nouveau_bo_init_mem_type,
764 .evict_flags = nouveau_bo_evict_flags,
765 .move = nouveau_bo_move,
766 .verify_access = nouveau_bo_verify_access,
767 .sync_obj_signaled = nouveau_fence_signalled,
768 .sync_obj_wait = nouveau_fence_wait,
769 .sync_obj_flush = nouveau_fence_flush,
770 .sync_obj_unref = nouveau_fence_unref,
771 .sync_obj_ref = nouveau_fence_ref,
772};
773