blob: d7a9e80a7c79e98000aedc184b1cc3aa9230bc7c [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
30#include "drmP.h"
31
32#include "nouveau_drm.h"
33#include "nouveau_drv.h"
34#include "nouveau_dma.h"
Ben Skeggsf869ef82010-11-15 11:53:16 +100035#include "nouveau_mm.h"
36#include "nouveau_vm.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100037
Maarten Maathuisa5106042009-12-26 21:46:36 +010038#include <linux/log2.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/slab.h>
Maarten Maathuisa5106042009-12-26 21:46:36 +010040
Ben Skeggs6ee73862009-12-11 19:24:15 +100041static void
42nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
43{
44 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
Francisco Jereza0af9ad2009-12-11 16:51:09 +010045 struct drm_device *dev = dev_priv->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +100046 struct nouveau_bo *nvbo = nouveau_bo(bo);
47
Ben Skeggs6ee73862009-12-11 19:24:15 +100048 if (unlikely(nvbo->gem))
49 DRM_ERROR("bo %p still attached to GEM object\n", bo);
50
Francisco Jereza5cf68b2010-10-24 16:14:41 +020051 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
Ben Skeggs4c1361422010-11-15 11:54:21 +100052 nouveau_vm_put(&nvbo->vma);
Ben Skeggs6ee73862009-12-11 19:24:15 +100053 kfree(nvbo);
54}
55
Francisco Jereza0af9ad2009-12-11 16:51:09 +010056static void
Ben Skeggsdb5c8e22011-02-10 13:41:01 +100057nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
58 int *align, int *size, int *page_shift)
Francisco Jereza0af9ad2009-12-11 16:51:09 +010059{
Ben Skeggsbfd83ac2010-11-12 15:12:51 +100060 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
Francisco Jereza0af9ad2009-12-11 16:51:09 +010061
Ben Skeggs573a2a32010-08-25 15:26:04 +100062 if (dev_priv->card_type < NV_50) {
Ben Skeggsbfd83ac2010-11-12 15:12:51 +100063 if (nvbo->tile_mode) {
Francisco Jereza0af9ad2009-12-11 16:51:09 +010064 if (dev_priv->chipset >= 0x40) {
65 *align = 65536;
Ben Skeggsbfd83ac2010-11-12 15:12:51 +100066 *size = roundup(*size, 64 * nvbo->tile_mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +010067
68 } else if (dev_priv->chipset >= 0x30) {
69 *align = 32768;
Ben Skeggsbfd83ac2010-11-12 15:12:51 +100070 *size = roundup(*size, 64 * nvbo->tile_mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +010071
72 } else if (dev_priv->chipset >= 0x20) {
73 *align = 16384;
Ben Skeggsbfd83ac2010-11-12 15:12:51 +100074 *size = roundup(*size, 64 * nvbo->tile_mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +010075
76 } else if (dev_priv->chipset >= 0x10) {
77 *align = 16384;
Ben Skeggsbfd83ac2010-11-12 15:12:51 +100078 *size = roundup(*size, 32 * nvbo->tile_mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +010079 }
80 }
Ben Skeggsbfd83ac2010-11-12 15:12:51 +100081 } else {
82 if (likely(dev_priv->chan_vm)) {
Ben Skeggsdb5c8e22011-02-10 13:41:01 +100083 if (!(flags & TTM_PL_FLAG_TT) && *size > 256 * 1024)
Ben Skeggsbfd83ac2010-11-12 15:12:51 +100084 *page_shift = dev_priv->chan_vm->lpg_shift;
85 else
86 *page_shift = dev_priv->chan_vm->spg_shift;
87 } else {
88 *page_shift = 12;
89 }
90
91 *size = roundup(*size, (1 << *page_shift));
92 *align = max((1 << *page_shift), *align);
Francisco Jereza0af9ad2009-12-11 16:51:09 +010093 }
94
Maarten Maathuis1c7059e2009-12-25 18:51:17 +010095 *size = roundup(*size, PAGE_SIZE);
Francisco Jereza0af9ad2009-12-11 16:51:09 +010096}
97
Ben Skeggs6ee73862009-12-11 19:24:15 +100098int
99nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
100 int size, int align, uint32_t flags, uint32_t tile_mode,
Ben Skeggsd550c412011-02-16 08:41:56 +1000101 uint32_t tile_flags, struct nouveau_bo **pnvbo)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000102{
103 struct drm_nouveau_private *dev_priv = dev->dev_private;
104 struct nouveau_bo *nvbo;
Ben Skeggsbfd83ac2010-11-12 15:12:51 +1000105 int ret = 0, page_shift = 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000106
107 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
108 if (!nvbo)
109 return -ENOMEM;
110 INIT_LIST_HEAD(&nvbo->head);
111 INIT_LIST_HEAD(&nvbo->entry);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000112 nvbo->tile_mode = tile_mode;
113 nvbo->tile_flags = tile_flags;
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200114 nvbo->bo.bdev = &dev_priv->ttm.bdev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000115
Ben Skeggsdb5c8e22011-02-10 13:41:01 +1000116 nouveau_bo_fixup_align(nvbo, flags, &align, &size, &page_shift);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000117 align >>= PAGE_SHIFT;
118
Ben Skeggsd550c412011-02-16 08:41:56 +1000119 if (dev_priv->chan_vm) {
Ben Skeggsbfd83ac2010-11-12 15:12:51 +1000120 ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
Ben Skeggs4c1361422010-11-15 11:54:21 +1000121 NV_MEM_ACCESS_RW, &nvbo->vma);
122 if (ret) {
123 kfree(nvbo);
124 return ret;
125 }
126 }
127
Francisco Jerez812f2192011-02-03 01:49:33 +0100128 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100129 nouveau_bo_placement_set(nvbo, flags, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000130
131 nvbo->channel = chan;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000132 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
133 ttm_bo_type_device, &nvbo->placement, align, 0,
134 false, NULL, size, nouveau_bo_del_ttm);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000135 if (ret) {
136 /* ttm will call nouveau_bo_del_ttm if it fails.. */
137 return ret;
138 }
Ben Skeggs90af89b2010-04-15 14:42:34 +1000139 nvbo->channel = NULL;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000140
Ben Skeggs4c1361422010-11-15 11:54:21 +1000141 if (nvbo->vma.node) {
142 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
143 nvbo->bo.offset = nvbo->vma.offset;
144 }
145
Ben Skeggs6ee73862009-12-11 19:24:15 +1000146 *pnvbo = nvbo;
147 return 0;
148}
149
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100150static void
151set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000152{
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100153 *n = 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000154
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100155 if (type & TTM_PL_FLAG_VRAM)
156 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
157 if (type & TTM_PL_FLAG_TT)
158 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
159 if (type & TTM_PL_FLAG_SYSTEM)
160 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
161}
Ben Skeggs37cb3e082009-12-16 16:22:42 +1000162
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200163static void
164set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
165{
166 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
Francisco Jerez812f2192011-02-03 01:49:33 +0100167 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200168
169 if (dev_priv->card_type == NV_10 &&
Francisco Jerez812f2192011-02-03 01:49:33 +0100170 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
171 nvbo->bo.mem.num_pages < vram_pages / 2) {
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200172 /*
173 * Make sure that the color and depth buffers are handled
174 * by independent memory controller units. Up to a 9x
175 * speed up when alpha-blending and depth-test are enabled
176 * at the same time.
177 */
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200178 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
179 nvbo->placement.fpfn = vram_pages / 2;
180 nvbo->placement.lpfn = ~0;
181 } else {
182 nvbo->placement.fpfn = 0;
183 nvbo->placement.lpfn = vram_pages / 2;
184 }
185 }
186}
187
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100188void
189nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
190{
191 struct ttm_placement *pl = &nvbo->placement;
192 uint32_t flags = TTM_PL_MASK_CACHING |
193 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
194
195 pl->placement = nvbo->placements;
196 set_placement_list(nvbo->placements, &pl->num_placement,
197 type, flags);
198
199 pl->busy_placement = nvbo->busy_placements;
200 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
201 type | busy, flags);
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200202
203 set_placement_range(nvbo, type);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000204}
205
206int
207nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
208{
209 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
210 struct ttm_buffer_object *bo = &nvbo->bo;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100211 int ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000212
213 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
214 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
215 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
216 1 << bo->mem.mem_type, memtype);
217 return -EINVAL;
218 }
219
220 if (nvbo->pin_refcnt++)
221 return 0;
222
223 ret = ttm_bo_reserve(bo, false, false, false, 0);
224 if (ret)
225 goto out;
226
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100227 nouveau_bo_placement_set(nvbo, memtype, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000228
Ben Skeggs7a45d762010-11-22 08:50:27 +1000229 ret = nouveau_bo_validate(nvbo, false, false, false);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000230 if (ret == 0) {
231 switch (bo->mem.mem_type) {
232 case TTM_PL_VRAM:
233 dev_priv->fb_aper_free -= bo->mem.size;
234 break;
235 case TTM_PL_TT:
236 dev_priv->gart_info.aper_free -= bo->mem.size;
237 break;
238 default:
239 break;
240 }
241 }
242 ttm_bo_unreserve(bo);
243out:
244 if (unlikely(ret))
245 nvbo->pin_refcnt--;
246 return ret;
247}
248
249int
250nouveau_bo_unpin(struct nouveau_bo *nvbo)
251{
252 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
253 struct ttm_buffer_object *bo = &nvbo->bo;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100254 int ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000255
256 if (--nvbo->pin_refcnt)
257 return 0;
258
259 ret = ttm_bo_reserve(bo, false, false, false, 0);
260 if (ret)
261 return ret;
262
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100263 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000264
Ben Skeggs7a45d762010-11-22 08:50:27 +1000265 ret = nouveau_bo_validate(nvbo, false, false, false);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000266 if (ret == 0) {
267 switch (bo->mem.mem_type) {
268 case TTM_PL_VRAM:
269 dev_priv->fb_aper_free += bo->mem.size;
270 break;
271 case TTM_PL_TT:
272 dev_priv->gart_info.aper_free += bo->mem.size;
273 break;
274 default:
275 break;
276 }
277 }
278
279 ttm_bo_unreserve(bo);
280 return ret;
281}
282
283int
284nouveau_bo_map(struct nouveau_bo *nvbo)
285{
286 int ret;
287
288 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
289 if (ret)
290 return ret;
291
292 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
293 ttm_bo_unreserve(&nvbo->bo);
294 return ret;
295}
296
297void
298nouveau_bo_unmap(struct nouveau_bo *nvbo)
299{
Ben Skeggs9d59e8a2010-08-27 13:04:41 +1000300 if (nvbo)
301 ttm_bo_kunmap(&nvbo->kmap);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000302}
303
Ben Skeggs7a45d762010-11-22 08:50:27 +1000304int
305nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
306 bool no_wait_reserve, bool no_wait_gpu)
307{
308 int ret;
309
310 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
311 no_wait_reserve, no_wait_gpu);
312 if (ret)
313 return ret;
314
Ben Skeggs4c1361422010-11-15 11:54:21 +1000315 if (nvbo->vma.node) {
316 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
317 nvbo->bo.offset = nvbo->vma.offset;
318 }
319
Ben Skeggs7a45d762010-11-22 08:50:27 +1000320 return 0;
321}
322
Ben Skeggs6ee73862009-12-11 19:24:15 +1000323u16
324nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
325{
326 bool is_iomem;
327 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
328 mem = &mem[index];
329 if (is_iomem)
330 return ioread16_native((void __force __iomem *)mem);
331 else
332 return *mem;
333}
334
335void
336nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
337{
338 bool is_iomem;
339 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
340 mem = &mem[index];
341 if (is_iomem)
342 iowrite16_native(val, (void __force __iomem *)mem);
343 else
344 *mem = val;
345}
346
347u32
348nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
349{
350 bool is_iomem;
351 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
352 mem = &mem[index];
353 if (is_iomem)
354 return ioread32_native((void __force __iomem *)mem);
355 else
356 return *mem;
357}
358
359void
360nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
361{
362 bool is_iomem;
363 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
364 mem = &mem[index];
365 if (is_iomem)
366 iowrite32_native(val, (void __force __iomem *)mem);
367 else
368 *mem = val;
369}
370
371static struct ttm_backend *
372nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
373{
374 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
375 struct drm_device *dev = dev_priv->dev;
376
377 switch (dev_priv->gart_info.type) {
Ben Skeggsb694dfb2009-12-15 10:38:32 +1000378#if __OS_HAS_AGP
Ben Skeggs6ee73862009-12-11 19:24:15 +1000379 case NOUVEAU_GART_AGP:
380 return ttm_agp_backend_init(bdev, dev->agp->bridge);
Ben Skeggsb694dfb2009-12-15 10:38:32 +1000381#endif
Ben Skeggs58e6c7a2011-01-11 14:10:09 +1000382 case NOUVEAU_GART_PDMA:
383 case NOUVEAU_GART_HW:
Ben Skeggs6ee73862009-12-11 19:24:15 +1000384 return nouveau_sgdma_init_ttm(dev);
385 default:
386 NV_ERROR(dev, "Unknown GART type %d\n",
387 dev_priv->gart_info.type);
388 break;
389 }
390
391 return NULL;
392}
393
394static int
395nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
396{
397 /* We'll do this from user space. */
398 return 0;
399}
400
401static int
402nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
403 struct ttm_mem_type_manager *man)
404{
405 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
406 struct drm_device *dev = dev_priv->dev;
407
408 switch (type) {
409 case TTM_PL_SYSTEM:
410 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
411 man->available_caching = TTM_PL_MASK_CACHING;
412 man->default_caching = TTM_PL_FLAG_CACHED;
413 break;
414 case TTM_PL_VRAM:
Ben Skeggs8984e042010-11-15 11:48:33 +1000415 if (dev_priv->card_type >= NV_50) {
Ben Skeggs573a2a32010-08-25 15:26:04 +1000416 man->func = &nouveau_vram_manager;
Ben Skeggsf869ef82010-11-15 11:53:16 +1000417 man->io_reserve_fastpath = false;
418 man->use_io_reserve_lru = true;
419 } else {
Ben Skeggs573a2a32010-08-25 15:26:04 +1000420 man->func = &ttm_bo_manager_func;
Ben Skeggsf869ef82010-11-15 11:53:16 +1000421 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000422 man->flags = TTM_MEMTYPE_FLAG_FIXED |
Jerome Glissef32f02f2010-04-09 14:39:25 +0200423 TTM_MEMTYPE_FLAG_MAPPABLE;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000424 man->available_caching = TTM_PL_FLAG_UNCACHED |
425 TTM_PL_FLAG_WC;
426 man->default_caching = TTM_PL_FLAG_WC;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000427 break;
428 case TTM_PL_TT:
Ben Skeggsd961db72010-08-05 10:48:18 +1000429 man->func = &ttm_bo_manager_func;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000430 switch (dev_priv->gart_info.type) {
431 case NOUVEAU_GART_AGP:
Jerome Glissef32f02f2010-04-09 14:39:25 +0200432 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
Francisco Jereza3d487e2010-11-20 22:11:22 +0100433 man->available_caching = TTM_PL_FLAG_UNCACHED |
434 TTM_PL_FLAG_WC;
435 man->default_caching = TTM_PL_FLAG_WC;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000436 break;
Ben Skeggs58e6c7a2011-01-11 14:10:09 +1000437 case NOUVEAU_GART_PDMA:
438 case NOUVEAU_GART_HW:
Ben Skeggs6ee73862009-12-11 19:24:15 +1000439 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
440 TTM_MEMTYPE_FLAG_CMA;
441 man->available_caching = TTM_PL_MASK_CACHING;
442 man->default_caching = TTM_PL_FLAG_CACHED;
Ben Skeggsb571fe22010-11-16 10:13:05 +1000443 man->gpu_offset = dev_priv->gart_info.aper_base;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000444 break;
445 default:
446 NV_ERROR(dev, "Unknown GART type: %d\n",
447 dev_priv->gart_info.type);
448 return -EINVAL;
449 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000450 break;
451 default:
452 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
453 return -EINVAL;
454 }
455 return 0;
456}
457
458static void
459nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
460{
461 struct nouveau_bo *nvbo = nouveau_bo(bo);
462
463 switch (bo->mem.mem_type) {
Francisco Jerez22fbd532009-12-11 18:40:17 +0100464 case TTM_PL_VRAM:
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100465 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
466 TTM_PL_FLAG_SYSTEM);
Francisco Jerez22fbd532009-12-11 18:40:17 +0100467 break;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000468 default:
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100469 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000470 break;
471 }
Francisco Jerez22fbd532009-12-11 18:40:17 +0100472
473 *pl = nvbo->placement;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000474}
475
476
477/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
478 * TTM_PL_{VRAM,TT} directly.
479 */
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100480
Ben Skeggs6ee73862009-12-11 19:24:15 +1000481static int
482nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000483 struct nouveau_bo *nvbo, bool evict,
484 bool no_wait_reserve, bool no_wait_gpu,
Ben Skeggs6ee73862009-12-11 19:24:15 +1000485 struct ttm_mem_reg *new_mem)
486{
487 struct nouveau_fence *fence = NULL;
488 int ret;
489
490 ret = nouveau_fence_new(chan, &fence, true);
491 if (ret)
492 return ret;
493
Francisco Jerez64798812010-09-21 19:02:01 +0200494 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
Francisco Jerez311ab692010-07-04 12:54:23 +0200495 no_wait_reserve, no_wait_gpu, new_mem);
Marcin Slusarz382d62e2010-10-20 21:50:24 +0200496 nouveau_fence_unref(&fence);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000497 return ret;
498}
499
Ben Skeggs6ee73862009-12-11 19:24:15 +1000500static int
Ben Skeggs183720b2010-12-09 15:17:10 +1000501nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
502 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
503{
504 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
505 struct nouveau_bo *nvbo = nouveau_bo(bo);
Ben Skeggs183720b2010-12-09 15:17:10 +1000506 u32 page_count = new_mem->num_pages;
Ben Skeggsd550c412011-02-16 08:41:56 +1000507 u64 src_offset, dst_offset;
Ben Skeggs183720b2010-12-09 15:17:10 +1000508 int ret;
509
Ben Skeggsd550c412011-02-16 08:41:56 +1000510 src_offset = old_mem->start << PAGE_SHIFT;
511 if (old_mem->mem_type == TTM_PL_VRAM)
512 src_offset = nvbo->vma.offset;
513 else
514 src_offset += dev_priv->gart_info.aper_base;
Ben Skeggs183720b2010-12-09 15:17:10 +1000515
Ben Skeggsd550c412011-02-16 08:41:56 +1000516 dst_offset = new_mem->start << PAGE_SHIFT;
517 if (new_mem->mem_type == TTM_PL_VRAM)
518 dst_offset = nvbo->vma.offset;
519 else
520 dst_offset += dev_priv->gart_info.aper_base;
Ben Skeggs183720b2010-12-09 15:17:10 +1000521
522 page_count = new_mem->num_pages;
523 while (page_count) {
524 int line_count = (page_count > 2047) ? 2047 : page_count;
525
526 ret = RING_SPACE(chan, 12);
527 if (ret)
528 return ret;
529
530 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
531 OUT_RING (chan, upper_32_bits(dst_offset));
532 OUT_RING (chan, lower_32_bits(dst_offset));
533 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
534 OUT_RING (chan, upper_32_bits(src_offset));
535 OUT_RING (chan, lower_32_bits(src_offset));
536 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
537 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
538 OUT_RING (chan, PAGE_SIZE); /* line_length */
539 OUT_RING (chan, line_count);
540 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
541 OUT_RING (chan, 0x00100110);
542
543 page_count -= line_count;
544 src_offset += (PAGE_SIZE * line_count);
545 dst_offset += (PAGE_SIZE * line_count);
546 }
547
548 return 0;
549}
550
551static int
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000552nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
553 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000554{
Ben Skeggs6ee73862009-12-11 19:24:15 +1000555 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000556 struct nouveau_bo *nvbo = nouveau_bo(bo);
557 u64 length = (new_mem->num_pages << PAGE_SHIFT);
558 u64 src_offset, dst_offset;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000559 int ret;
560
Ben Skeggsd961db72010-08-05 10:48:18 +1000561 src_offset = old_mem->start << PAGE_SHIFT;
Ben Skeggsd550c412011-02-16 08:41:56 +1000562 if (old_mem->mem_type == TTM_PL_VRAM)
563 src_offset = nvbo->vma.offset;
564 else
565 src_offset += dev_priv->gart_info.aper_base;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000566
Ben Skeggsd550c412011-02-16 08:41:56 +1000567 dst_offset = new_mem->start << PAGE_SHIFT;
568 if (new_mem->mem_type == TTM_PL_VRAM)
569 dst_offset = nvbo->vma.offset;
570 else
571 dst_offset += dev_priv->gart_info.aper_base;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000572
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000573 while (length) {
574 u32 amount, stride, height;
575
Ben Skeggs5220b3c2010-09-23 15:21:17 +1000576 amount = min(length, (u64)(4 * 1024 * 1024));
577 stride = 16 * 4;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000578 height = amount / stride;
579
Francisco Jerezf13b3262010-10-10 06:01:08 +0200580 if (new_mem->mem_type == TTM_PL_VRAM &&
581 nouveau_bo_tile_layout(nvbo)) {
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000582 ret = RING_SPACE(chan, 8);
583 if (ret)
584 return ret;
585
586 BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
587 OUT_RING (chan, 0);
Ben Skeggs5220b3c2010-09-23 15:21:17 +1000588 OUT_RING (chan, 0);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000589 OUT_RING (chan, stride);
590 OUT_RING (chan, height);
591 OUT_RING (chan, 1);
592 OUT_RING (chan, 0);
593 OUT_RING (chan, 0);
594 } else {
595 ret = RING_SPACE(chan, 2);
596 if (ret)
597 return ret;
598
599 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
600 OUT_RING (chan, 1);
601 }
Francisco Jerezf13b3262010-10-10 06:01:08 +0200602 if (old_mem->mem_type == TTM_PL_VRAM &&
603 nouveau_bo_tile_layout(nvbo)) {
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000604 ret = RING_SPACE(chan, 8);
605 if (ret)
606 return ret;
607
608 BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
609 OUT_RING (chan, 0);
Ben Skeggs5220b3c2010-09-23 15:21:17 +1000610 OUT_RING (chan, 0);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000611 OUT_RING (chan, stride);
612 OUT_RING (chan, height);
613 OUT_RING (chan, 1);
614 OUT_RING (chan, 0);
615 OUT_RING (chan, 0);
616 } else {
617 ret = RING_SPACE(chan, 2);
618 if (ret)
619 return ret;
620
621 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
622 OUT_RING (chan, 1);
623 }
624
625 ret = RING_SPACE(chan, 14);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000626 if (ret)
627 return ret;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000628
629 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
630 OUT_RING (chan, upper_32_bits(src_offset));
631 OUT_RING (chan, upper_32_bits(dst_offset));
632 BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
633 OUT_RING (chan, lower_32_bits(src_offset));
634 OUT_RING (chan, lower_32_bits(dst_offset));
635 OUT_RING (chan, stride);
636 OUT_RING (chan, stride);
637 OUT_RING (chan, stride);
638 OUT_RING (chan, height);
639 OUT_RING (chan, 0x00000101);
640 OUT_RING (chan, 0x00000000);
641 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
642 OUT_RING (chan, 0);
643
644 length -= amount;
645 src_offset += amount;
646 dst_offset += amount;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000647 }
648
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000649 return 0;
650}
651
Ben Skeggsa6704782011-02-16 09:10:20 +1000652static inline uint32_t
653nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
654 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
655{
656 if (mem->mem_type == TTM_PL_TT)
657 return chan->gart_handle;
658 return chan->vram_handle;
659}
660
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000661static int
662nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
663 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
664{
Ben Skeggsd961db72010-08-05 10:48:18 +1000665 u32 src_offset = old_mem->start << PAGE_SHIFT;
666 u32 dst_offset = new_mem->start << PAGE_SHIFT;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000667 u32 page_count = new_mem->num_pages;
668 int ret;
669
670 ret = RING_SPACE(chan, 3);
671 if (ret)
672 return ret;
673
674 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
675 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
676 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
677
Ben Skeggs6ee73862009-12-11 19:24:15 +1000678 page_count = new_mem->num_pages;
679 while (page_count) {
680 int line_count = (page_count > 2047) ? 2047 : page_count;
681
Ben Skeggs6ee73862009-12-11 19:24:15 +1000682 ret = RING_SPACE(chan, 11);
683 if (ret)
684 return ret;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000685
Ben Skeggs6ee73862009-12-11 19:24:15 +1000686 BEGIN_RING(chan, NvSubM2MF,
687 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000688 OUT_RING (chan, src_offset);
689 OUT_RING (chan, dst_offset);
690 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
691 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
692 OUT_RING (chan, PAGE_SIZE); /* line_length */
693 OUT_RING (chan, line_count);
694 OUT_RING (chan, 0x00000101);
695 OUT_RING (chan, 0x00000000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000696 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000697 OUT_RING (chan, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000698
699 page_count -= line_count;
700 src_offset += (PAGE_SIZE * line_count);
701 dst_offset += (PAGE_SIZE * line_count);
702 }
703
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000704 return 0;
705}
706
707static int
708nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
709 bool no_wait_reserve, bool no_wait_gpu,
710 struct ttm_mem_reg *new_mem)
711{
712 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
713 struct nouveau_bo *nvbo = nouveau_bo(bo);
714 struct nouveau_channel *chan;
715 int ret;
716
717 chan = nvbo->channel;
Ben Skeggsd550c412011-02-16 08:41:56 +1000718 if (!chan) {
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000719 chan = dev_priv->channel;
Francisco Jereze419cf02010-10-25 23:38:59 +0200720 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
Ben Skeggs6a6b73f2010-10-05 16:53:48 +1000721 }
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000722
723 if (dev_priv->card_type < NV_50)
724 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
725 else
Ben Skeggs183720b2010-12-09 15:17:10 +1000726 if (dev_priv->card_type < NV_C0)
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000727 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
Ben Skeggs183720b2010-12-09 15:17:10 +1000728 else
729 ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
Ben Skeggs6a6b73f2010-10-05 16:53:48 +1000730 if (ret == 0) {
731 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
732 no_wait_reserve,
733 no_wait_gpu, new_mem);
734 }
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000735
Ben Skeggs6a6b73f2010-10-05 16:53:48 +1000736 if (chan == dev_priv->channel)
737 mutex_unlock(&chan->mutex);
738 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000739}
740
741static int
742nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000743 bool no_wait_reserve, bool no_wait_gpu,
744 struct ttm_mem_reg *new_mem)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000745{
746 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
747 struct ttm_placement placement;
748 struct ttm_mem_reg tmp_mem;
749 int ret;
750
751 placement.fpfn = placement.lpfn = 0;
752 placement.num_placement = placement.num_busy_placement = 1;
Francisco Jerez77e2b5e2009-12-16 19:05:00 +0100753 placement.placement = placement.busy_placement = &placement_memtype;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000754
755 tmp_mem = *new_mem;
756 tmp_mem.mm_node = NULL;
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000757 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000758 if (ret)
759 return ret;
760
761 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
762 if (ret)
763 goto out;
764
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000765 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000766 if (ret)
767 goto out;
768
Ben Skeggsb8884da2011-02-14 13:51:28 +1000769 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000770out:
Ben Skeggs42311ff2010-08-04 12:07:08 +1000771 ttm_bo_mem_put(bo, &tmp_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000772 return ret;
773}
774
775static int
776nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000777 bool no_wait_reserve, bool no_wait_gpu,
778 struct ttm_mem_reg *new_mem)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000779{
780 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
781 struct ttm_placement placement;
782 struct ttm_mem_reg tmp_mem;
783 int ret;
784
785 placement.fpfn = placement.lpfn = 0;
786 placement.num_placement = placement.num_busy_placement = 1;
Francisco Jerez77e2b5e2009-12-16 19:05:00 +0100787 placement.placement = placement.busy_placement = &placement_memtype;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000788
789 tmp_mem = *new_mem;
790 tmp_mem.mm_node = NULL;
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000791 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000792 if (ret)
793 return ret;
794
Ben Skeggsb8884da2011-02-14 13:51:28 +1000795 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000796 if (ret)
797 goto out;
798
Ben Skeggsb8884da2011-02-14 13:51:28 +1000799 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000800 if (ret)
801 goto out;
802
803out:
Ben Skeggs42311ff2010-08-04 12:07:08 +1000804 ttm_bo_mem_put(bo, &tmp_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000805 return ret;
806}
807
Ben Skeggsa4154bb2011-02-10 10:35:16 +1000808static void
809nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
810{
811 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
812 struct nouveau_bo *nvbo = nouveau_bo(bo);
813
814 if (dev_priv->card_type < NV_50 || nvbo->no_vm)
815 return;
816
817 switch (new_mem->mem_type) {
818 case TTM_PL_VRAM:
819 nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
820 break;
821 case TTM_PL_TT:
822 default:
823 break;
824 }
825}
826
Ben Skeggs6ee73862009-12-11 19:24:15 +1000827static int
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100828nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
829 struct nouveau_tile_reg **new_tile)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000830{
831 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000832 struct drm_device *dev = dev_priv->dev;
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100833 struct nouveau_bo *nvbo = nouveau_bo(bo);
Ben Skeggsa4154bb2011-02-10 10:35:16 +1000834 u64 offset = new_mem->start << PAGE_SHIFT;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000835
Ben Skeggsa4154bb2011-02-10 10:35:16 +1000836 *new_tile = NULL;
837 if (new_mem->mem_type != TTM_PL_VRAM)
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100838 return 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000839
Ben Skeggsa4154bb2011-02-10 10:35:16 +1000840 if (dev_priv->card_type >= NV_10) {
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100841 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
Francisco Jereza5cf68b2010-10-24 16:14:41 +0200842 nvbo->tile_mode,
843 nvbo->tile_flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000844 }
845
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100846 return 0;
847}
Ben Skeggs6ee73862009-12-11 19:24:15 +1000848
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100849static void
850nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
851 struct nouveau_tile_reg *new_tile,
852 struct nouveau_tile_reg **old_tile)
853{
854 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
855 struct drm_device *dev = dev_priv->dev;
856
Ben Skeggsa4154bb2011-02-10 10:35:16 +1000857 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
858 *old_tile = new_tile;
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100859}
860
861static int
862nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000863 bool no_wait_reserve, bool no_wait_gpu,
864 struct ttm_mem_reg *new_mem)
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100865{
866 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
867 struct nouveau_bo *nvbo = nouveau_bo(bo);
868 struct ttm_mem_reg *old_mem = &bo->mem;
869 struct nouveau_tile_reg *new_tile = NULL;
870 int ret = 0;
871
Ben Skeggsa4154bb2011-02-10 10:35:16 +1000872 if (dev_priv->card_type < NV_50) {
873 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
874 if (ret)
875 return ret;
876 }
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100877
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100878 /* Fake bo copy. */
Ben Skeggs6ee73862009-12-11 19:24:15 +1000879 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
880 BUG_ON(bo->mem.mm_node != NULL);
881 bo->mem = *new_mem;
882 new_mem->mm_node = NULL;
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100883 goto out;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000884 }
885
Ben Skeggsb8a6a802010-08-27 11:55:43 +1000886 /* Software copy if the card isn't up and running yet. */
Ben Skeggs183720b2010-12-09 15:17:10 +1000887 if (!dev_priv->channel) {
Ben Skeggsb8a6a802010-08-27 11:55:43 +1000888 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
889 goto out;
890 }
891
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100892 /* Hardware assisted copy. */
893 if (new_mem->mem_type == TTM_PL_SYSTEM)
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000894 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100895 else if (old_mem->mem_type == TTM_PL_SYSTEM)
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000896 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100897 else
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000898 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000899
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100900 if (!ret)
901 goto out;
902
903 /* Fallback to software copy. */
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000904 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100905
906out:
Ben Skeggsa4154bb2011-02-10 10:35:16 +1000907 if (dev_priv->card_type < NV_50) {
908 if (ret)
909 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
910 else
911 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
912 }
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100913
914 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000915}
916
917static int
918nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
919{
920 return 0;
921}
922
Jerome Glissef32f02f2010-04-09 14:39:25 +0200923static int
924nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
925{
926 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
927 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
928 struct drm_device *dev = dev_priv->dev;
Ben Skeggsf869ef82010-11-15 11:53:16 +1000929 int ret;
Jerome Glissef32f02f2010-04-09 14:39:25 +0200930
931 mem->bus.addr = NULL;
932 mem->bus.offset = 0;
933 mem->bus.size = mem->num_pages << PAGE_SHIFT;
934 mem->bus.base = 0;
935 mem->bus.is_iomem = false;
936 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
937 return -EINVAL;
938 switch (mem->mem_type) {
939 case TTM_PL_SYSTEM:
940 /* System memory */
941 return 0;
942 case TTM_PL_TT:
943#if __OS_HAS_AGP
944 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
Ben Skeggsd961db72010-08-05 10:48:18 +1000945 mem->bus.offset = mem->start << PAGE_SHIFT;
Jerome Glissef32f02f2010-04-09 14:39:25 +0200946 mem->bus.base = dev_priv->gart_info.aper_base;
947 mem->bus.is_iomem = true;
948 }
949#endif
950 break;
951 case TTM_PL_VRAM:
Ben Skeggsf869ef82010-11-15 11:53:16 +1000952 {
953 struct nouveau_vram *vram = mem->mm_node;
Ben Skeggs8984e042010-11-15 11:48:33 +1000954 u8 page_shift;
Ben Skeggsf869ef82010-11-15 11:53:16 +1000955
956 if (!dev_priv->bar1_vm) {
957 mem->bus.offset = mem->start << PAGE_SHIFT;
958 mem->bus.base = pci_resource_start(dev->pdev, 1);
959 mem->bus.is_iomem = true;
960 break;
961 }
962
Ben Skeggs8984e042010-11-15 11:48:33 +1000963 if (dev_priv->card_type == NV_C0)
964 page_shift = vram->page_shift;
965 else
966 page_shift = 12;
967
Ben Skeggs4c74eb72010-11-10 14:10:04 +1000968 ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
Ben Skeggs8984e042010-11-15 11:48:33 +1000969 page_shift, NV_MEM_ACCESS_RW,
Ben Skeggs4c74eb72010-11-10 14:10:04 +1000970 &vram->bar_vma);
Ben Skeggsf869ef82010-11-15 11:53:16 +1000971 if (ret)
972 return ret;
973
974 nouveau_vm_map(&vram->bar_vma, vram);
975 if (ret) {
976 nouveau_vm_put(&vram->bar_vma);
977 return ret;
978 }
979
Ben Skeggs8984e042010-11-15 11:48:33 +1000980 mem->bus.offset = vram->bar_vma.offset;
981 if (dev_priv->card_type == NV_50) /*XXX*/
982 mem->bus.offset -= 0x0020000000ULL;
Jordan Crouse01d73a62010-05-27 13:40:24 -0600983 mem->bus.base = pci_resource_start(dev->pdev, 1);
Jerome Glissef32f02f2010-04-09 14:39:25 +0200984 mem->bus.is_iomem = true;
Ben Skeggsf869ef82010-11-15 11:53:16 +1000985 }
Jerome Glissef32f02f2010-04-09 14:39:25 +0200986 break;
987 default:
988 return -EINVAL;
989 }
990 return 0;
991}
992
993static void
994nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
995{
Ben Skeggsf869ef82010-11-15 11:53:16 +1000996 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
997 struct nouveau_vram *vram = mem->mm_node;
998
999 if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
1000 return;
1001
1002 if (!vram->bar_vma.node)
1003 return;
1004
1005 nouveau_vm_unmap(&vram->bar_vma);
1006 nouveau_vm_put(&vram->bar_vma);
Jerome Glissef32f02f2010-04-09 14:39:25 +02001007}
1008
1009static int
1010nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1011{
Ben Skeggse1429b42010-09-10 11:12:25 +10001012 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1013 struct nouveau_bo *nvbo = nouveau_bo(bo);
1014
1015 /* as long as the bo isn't in vram, and isn't tiled, we've got
1016 * nothing to do here.
1017 */
1018 if (bo->mem.mem_type != TTM_PL_VRAM) {
Francisco Jerezf13b3262010-10-10 06:01:08 +02001019 if (dev_priv->card_type < NV_50 ||
1020 !nouveau_bo_tile_layout(nvbo))
Ben Skeggse1429b42010-09-10 11:12:25 +10001021 return 0;
1022 }
1023
1024 /* make sure bo is in mappable vram */
Ben Skeggsd961db72010-08-05 10:48:18 +10001025 if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
Ben Skeggse1429b42010-09-10 11:12:25 +10001026 return 0;
1027
1028
1029 nvbo->placement.fpfn = 0;
1030 nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
1031 nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
Ben Skeggs7a45d762010-11-22 08:50:27 +10001032 return nouveau_bo_validate(nvbo, false, true, false);
Jerome Glissef32f02f2010-04-09 14:39:25 +02001033}
1034
Francisco Jerez332b2422010-10-20 23:35:40 +02001035void
1036nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1037{
Francisco Jerez23c45e82010-10-28 23:10:29 +02001038 struct nouveau_fence *old_fence;
Francisco Jerez332b2422010-10-20 23:35:40 +02001039
1040 if (likely(fence))
Francisco Jerez23c45e82010-10-28 23:10:29 +02001041 nouveau_fence_ref(fence);
Francisco Jerez332b2422010-10-20 23:35:40 +02001042
Francisco Jerez23c45e82010-10-28 23:10:29 +02001043 spin_lock(&nvbo->bo.bdev->fence_lock);
1044 old_fence = nvbo->bo.sync_obj;
1045 nvbo->bo.sync_obj = fence;
Francisco Jerez332b2422010-10-20 23:35:40 +02001046 spin_unlock(&nvbo->bo.bdev->fence_lock);
Francisco Jerez23c45e82010-10-28 23:10:29 +02001047
1048 nouveau_fence_unref(&old_fence);
Francisco Jerez332b2422010-10-20 23:35:40 +02001049}
1050
Ben Skeggs6ee73862009-12-11 19:24:15 +10001051struct ttm_bo_driver nouveau_bo_driver = {
1052 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
1053 .invalidate_caches = nouveau_bo_invalidate_caches,
1054 .init_mem_type = nouveau_bo_init_mem_type,
1055 .evict_flags = nouveau_bo_evict_flags,
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001056 .move_notify = nouveau_bo_move_ntfy,
Ben Skeggs6ee73862009-12-11 19:24:15 +10001057 .move = nouveau_bo_move,
1058 .verify_access = nouveau_bo_verify_access,
Marcin Slusarz382d62e2010-10-20 21:50:24 +02001059 .sync_obj_signaled = __nouveau_fence_signalled,
1060 .sync_obj_wait = __nouveau_fence_wait,
1061 .sync_obj_flush = __nouveau_fence_flush,
1062 .sync_obj_unref = __nouveau_fence_unref,
1063 .sync_obj_ref = __nouveau_fence_ref,
Jerome Glissef32f02f2010-04-09 14:39:25 +02001064 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1065 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1066 .io_mem_free = &nouveau_ttm_io_mem_free,
Ben Skeggs6ee73862009-12-11 19:24:15 +10001067};
1068