blob: 9c9291b3bfb5d7991ed2baf7e437f0ee8dccf52e [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
Ben Skeggsebb945a2012-07-20 08:17:34 +100030#include <core/engine.h>
Chris Metcalf3e2b7562013-02-01 13:44:33 -050031#include <linux/swiotlb.h>
Ben Skeggs6ee73862009-12-11 19:24:15 +100032
Ben Skeggsebb945a2012-07-20 08:17:34 +100033#include <subdev/fb.h>
34#include <subdev/vm.h>
35#include <subdev/bar.h>
36
37#include "nouveau_drm.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100038#include "nouveau_dma.h"
Ben Skeggsd375e7d52012-04-30 13:30:00 +100039#include "nouveau_fence.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100040
Ben Skeggsebb945a2012-07-20 08:17:34 +100041#include "nouveau_bo.h"
42#include "nouveau_ttm.h"
43#include "nouveau_gem.h"
Maarten Maathuisa5106042009-12-26 21:46:36 +010044
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100045/*
46 * NV10-NV40 tiling helpers
47 */
48
49static void
Ben Skeggsebb945a2012-07-20 08:17:34 +100050nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
51 u32 addr, u32 size, u32 pitch, u32 flags)
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100052{
Ben Skeggs77145f12012-07-31 16:16:21 +100053 struct nouveau_drm *drm = nouveau_drm(dev);
Ben Skeggsebb945a2012-07-20 08:17:34 +100054 int i = reg - drm->tile.reg;
Ben Skeggs967e7bd2014-08-10 04:10:22 +100055 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
Ben Skeggsebb945a2012-07-20 08:17:34 +100056 struct nouveau_fb_tile *tile = &pfb->tile.region[i];
57 struct nouveau_engine *engine;
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100058
Ben Skeggsebb945a2012-07-20 08:17:34 +100059 nouveau_fence_unref(&reg->fence);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100060
61 if (tile->pitch)
Ben Skeggsebb945a2012-07-20 08:17:34 +100062 pfb->tile.fini(pfb, i, tile);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100063
64 if (pitch)
Ben Skeggsebb945a2012-07-20 08:17:34 +100065 pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100066
Ben Skeggsebb945a2012-07-20 08:17:34 +100067 pfb->tile.prog(pfb, i, tile);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100068
Ben Skeggsebb945a2012-07-20 08:17:34 +100069 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
70 engine->tile_prog(engine, i);
71 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
72 engine->tile_prog(engine, i);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100073}
74
Ben Skeggsebb945a2012-07-20 08:17:34 +100075static struct nouveau_drm_tile *
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100076nv10_bo_get_tile_region(struct drm_device *dev, int i)
77{
Ben Skeggs77145f12012-07-31 16:16:21 +100078 struct nouveau_drm *drm = nouveau_drm(dev);
Ben Skeggsebb945a2012-07-20 08:17:34 +100079 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100080
Ben Skeggsebb945a2012-07-20 08:17:34 +100081 spin_lock(&drm->tile.lock);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100082
83 if (!tile->used &&
84 (!tile->fence || nouveau_fence_done(tile->fence)))
85 tile->used = true;
86 else
87 tile = NULL;
88
Ben Skeggsebb945a2012-07-20 08:17:34 +100089 spin_unlock(&drm->tile.lock);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100090 return tile;
91}
92
93static void
Ben Skeggsebb945a2012-07-20 08:17:34 +100094nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
95 struct nouveau_fence *fence)
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100096{
Ben Skeggs77145f12012-07-31 16:16:21 +100097 struct nouveau_drm *drm = nouveau_drm(dev);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100098
99 if (tile) {
Ben Skeggsebb945a2012-07-20 08:17:34 +1000100 spin_lock(&drm->tile.lock);
Ben Skeggs5d216f62013-11-13 10:23:46 +1000101 tile->fence = nouveau_fence_ref(fence);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000102 tile->used = false;
Ben Skeggsebb945a2012-07-20 08:17:34 +1000103 spin_unlock(&drm->tile.lock);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000104 }
105}
106
Ben Skeggsebb945a2012-07-20 08:17:34 +1000107static struct nouveau_drm_tile *
108nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
109 u32 size, u32 pitch, u32 flags)
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000110{
Ben Skeggs77145f12012-07-31 16:16:21 +1000111 struct nouveau_drm *drm = nouveau_drm(dev);
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000112 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
Ben Skeggsebb945a2012-07-20 08:17:34 +1000113 struct nouveau_drm_tile *tile, *found = NULL;
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000114 int i;
115
Ben Skeggsebb945a2012-07-20 08:17:34 +1000116 for (i = 0; i < pfb->tile.regions; i++) {
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000117 tile = nv10_bo_get_tile_region(dev, i);
118
119 if (pitch && !found) {
120 found = tile;
121 continue;
122
Ben Skeggsebb945a2012-07-20 08:17:34 +1000123 } else if (tile && pfb->tile.region[i].pitch) {
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000124 /* Kill an unused tile region. */
125 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
126 }
127
128 nv10_bo_put_tile_region(dev, tile, NULL);
129 }
130
131 if (found)
132 nv10_bo_update_tile_region(dev, found, addr, size,
133 pitch, flags);
134 return found;
135}
136
Ben Skeggs6ee73862009-12-11 19:24:15 +1000137static void
138nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
139{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000140 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
141 struct drm_device *dev = drm->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000142 struct nouveau_bo *nvbo = nouveau_bo(bo);
143
David Herrmann55fb74a2013-10-02 10:15:17 +0200144 if (unlikely(nvbo->gem.filp))
Ben Skeggs6ee73862009-12-11 19:24:15 +1000145 DRM_ERROR("bo %p still attached to GEM object\n", bo);
Maarten Lankhorst4f385592013-07-07 10:37:35 +0200146 WARN_ON(nvbo->pin_refcnt > 0);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000147 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000148 kfree(nvbo);
149}
150
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100151static void
Ben Skeggsdb5c8e22011-02-10 13:41:01 +1000152nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
Ben Skeggsf91bac52011-06-06 14:15:46 +1000153 int *align, int *size)
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100154{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000155 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000156 struct nvif_device *device = &drm->device;
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100157
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000158 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
Ben Skeggsbfd83ac2010-11-12 15:12:51 +1000159 if (nvbo->tile_mode) {
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000160 if (device->info.chipset >= 0x40) {
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100161 *align = 65536;
Ben Skeggsbfd83ac2010-11-12 15:12:51 +1000162 *size = roundup(*size, 64 * nvbo->tile_mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100163
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000164 } else if (device->info.chipset >= 0x30) {
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100165 *align = 32768;
Ben Skeggsbfd83ac2010-11-12 15:12:51 +1000166 *size = roundup(*size, 64 * nvbo->tile_mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100167
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000168 } else if (device->info.chipset >= 0x20) {
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100169 *align = 16384;
Ben Skeggsbfd83ac2010-11-12 15:12:51 +1000170 *size = roundup(*size, 64 * nvbo->tile_mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100171
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000172 } else if (device->info.chipset >= 0x10) {
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100173 *align = 16384;
Ben Skeggsbfd83ac2010-11-12 15:12:51 +1000174 *size = roundup(*size, 32 * nvbo->tile_mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100175 }
176 }
Ben Skeggsbfd83ac2010-11-12 15:12:51 +1000177 } else {
Ben Skeggsf91bac52011-06-06 14:15:46 +1000178 *size = roundup(*size, (1 << nvbo->page_shift));
179 *align = max((1 << nvbo->page_shift), *align);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100180 }
181
Maarten Maathuis1c7059e2009-12-25 18:51:17 +0100182 *size = roundup(*size, PAGE_SIZE);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100183}
184
Ben Skeggs6ee73862009-12-11 19:24:15 +1000185int
Ben Skeggs7375c952011-06-07 14:21:29 +1000186nouveau_bo_new(struct drm_device *dev, int size, int align,
187 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
Dave Airlie22b33e82012-04-02 11:53:06 +0100188 struct sg_table *sg,
Ben Skeggs7375c952011-06-07 14:21:29 +1000189 struct nouveau_bo **pnvbo)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000190{
Ben Skeggs77145f12012-07-31 16:16:21 +1000191 struct nouveau_drm *drm = nouveau_drm(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000192 struct nouveau_bo *nvbo;
Jerome Glisse57de4ba2011-11-11 15:42:57 -0500193 size_t acc_size;
Ben Skeggsf91bac52011-06-06 14:15:46 +1000194 int ret;
Dave Airlie22b33e82012-04-02 11:53:06 +0100195 int type = ttm_bo_type_device;
Maarten Lankhorst35095f72013-07-27 10:17:12 +0200196 int lpg_shift = 12;
197 int max_size;
198
Ben Skeggs3ee6f5b2014-08-10 04:10:20 +1000199 if (drm->client.vm)
200 lpg_shift = drm->client.vm->vmm->lpg_shift;
Maarten Lankhorst35095f72013-07-27 10:17:12 +0200201 max_size = INT_MAX & ~((1 << lpg_shift) - 1);
Maarten Lankhorst0108bc82013-07-07 10:40:19 +0200202
203 if (size <= 0 || size > max_size) {
Ben Skeggsfa2bade2014-08-10 04:10:22 +1000204 NV_WARN(drm, "skipped size %x\n", (u32)size);
Maarten Lankhorst0108bc82013-07-07 10:40:19 +0200205 return -EINVAL;
206 }
Dave Airlie22b33e82012-04-02 11:53:06 +0100207
208 if (sg)
209 type = ttm_bo_type_sg;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000210
211 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
212 if (!nvbo)
213 return -ENOMEM;
214 INIT_LIST_HEAD(&nvbo->head);
215 INIT_LIST_HEAD(&nvbo->entry);
Ben Skeggsfd2871a2011-06-06 14:07:04 +1000216 INIT_LIST_HEAD(&nvbo->vma_list);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000217 nvbo->tile_mode = tile_mode;
218 nvbo->tile_flags = tile_flags;
Ben Skeggsebb945a2012-07-20 08:17:34 +1000219 nvbo->bo.bdev = &drm->ttm.bdev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000220
Ben Skeggsf91bac52011-06-06 14:15:46 +1000221 nvbo->page_shift = 12;
Ben Skeggs3ee6f5b2014-08-10 04:10:20 +1000222 if (drm->client.vm) {
Ben Skeggsf91bac52011-06-06 14:15:46 +1000223 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
Ben Skeggs3ee6f5b2014-08-10 04:10:20 +1000224 nvbo->page_shift = drm->client.vm->vmm->lpg_shift;
Ben Skeggsf91bac52011-06-06 14:15:46 +1000225 }
226
227 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
Ben Skeggsfd2871a2011-06-06 14:07:04 +1000228 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
229 nouveau_bo_placement_set(nvbo, flags, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000230
Ben Skeggsebb945a2012-07-20 08:17:34 +1000231 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
Jerome Glisse57de4ba2011-11-11 15:42:57 -0500232 sizeof(struct nouveau_bo));
233
Ben Skeggsebb945a2012-07-20 08:17:34 +1000234 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
Dave Airlie22b33e82012-04-02 11:53:06 +0100235 type, &nvbo->placement,
Marcin Slusarz0b91c4a2012-11-06 21:49:51 +0000236 align >> PAGE_SHIFT, false, NULL, acc_size, sg,
Ben Skeggsfd2871a2011-06-06 14:07:04 +1000237 nouveau_bo_del_ttm);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000238 if (ret) {
239 /* ttm will call nouveau_bo_del_ttm if it fails.. */
240 return ret;
241 }
242
Ben Skeggs6ee73862009-12-11 19:24:15 +1000243 *pnvbo = nvbo;
244 return 0;
245}
246
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100247static void
248set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000249{
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100250 *n = 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000251
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100252 if (type & TTM_PL_FLAG_VRAM)
253 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
254 if (type & TTM_PL_FLAG_TT)
255 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
256 if (type & TTM_PL_FLAG_SYSTEM)
257 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
258}
Ben Skeggs37cb3e082009-12-16 16:22:42 +1000259
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200260static void
261set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
262{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000263 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000264 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
Ben Skeggsdceef5d2013-03-04 13:01:21 +1000265 u32 vram_pages = pfb->ram->size >> PAGE_SHIFT;
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200266
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000267 if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
Francisco Jerez812f2192011-02-03 01:49:33 +0100268 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
Francisco Jerez4beb1162011-11-06 21:21:28 +0100269 nvbo->bo.mem.num_pages < vram_pages / 4) {
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200270 /*
271 * Make sure that the color and depth buffers are handled
272 * by independent memory controller units. Up to a 9x
273 * speed up when alpha-blending and depth-test are enabled
274 * at the same time.
275 */
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200276 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
277 nvbo->placement.fpfn = vram_pages / 2;
278 nvbo->placement.lpfn = ~0;
279 } else {
280 nvbo->placement.fpfn = 0;
281 nvbo->placement.lpfn = vram_pages / 2;
282 }
283 }
284}
285
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100286void
287nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
288{
289 struct ttm_placement *pl = &nvbo->placement;
290 uint32_t flags = TTM_PL_MASK_CACHING |
291 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
292
293 pl->placement = nvbo->placements;
294 set_placement_list(nvbo->placements, &pl->num_placement,
295 type, flags);
296
297 pl->busy_placement = nvbo->busy_placements;
298 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
299 type | busy, flags);
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200300
301 set_placement_range(nvbo, type);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000302}
303
304int
305nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
306{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000307 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000308 struct ttm_buffer_object *bo = &nvbo->bo;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100309 int ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000310
Thierry Redingee3939e2014-07-21 13:15:51 +0200311 ret = ttm_bo_reserve(bo, false, false, false, NULL);
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100312 if (ret)
313 goto out;
314
Ben Skeggs6ee73862009-12-11 19:24:15 +1000315 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
Ben Skeggsebb945a2012-07-20 08:17:34 +1000316 NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
Ben Skeggs6ee73862009-12-11 19:24:15 +1000317 1 << bo->mem.mem_type, memtype);
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100318 ret = -EINVAL;
319 goto out;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000320 }
321
322 if (nvbo->pin_refcnt++)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000323 goto out;
324
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100325 nouveau_bo_placement_set(nvbo, memtype, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000326
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000327 ret = nouveau_bo_validate(nvbo, false, false);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000328 if (ret == 0) {
329 switch (bo->mem.mem_type) {
330 case TTM_PL_VRAM:
Ben Skeggsebb945a2012-07-20 08:17:34 +1000331 drm->gem.vram_available -= bo->mem.size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000332 break;
333 case TTM_PL_TT:
Ben Skeggsebb945a2012-07-20 08:17:34 +1000334 drm->gem.gart_available -= bo->mem.size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000335 break;
336 default:
337 break;
338 }
339 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000340out:
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100341 ttm_bo_unreserve(bo);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000342 return ret;
343}
344
345int
346nouveau_bo_unpin(struct nouveau_bo *nvbo)
347{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000348 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000349 struct ttm_buffer_object *bo = &nvbo->bo;
Maarten Lankhorst4f385592013-07-07 10:37:35 +0200350 int ret, ref;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000351
Thierry Redingee3939e2014-07-21 13:15:51 +0200352 ret = ttm_bo_reserve(bo, false, false, false, NULL);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000353 if (ret)
354 return ret;
355
Maarten Lankhorst4f385592013-07-07 10:37:35 +0200356 ref = --nvbo->pin_refcnt;
357 WARN_ON_ONCE(ref < 0);
358 if (ref)
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100359 goto out;
360
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100361 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000362
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000363 ret = nouveau_bo_validate(nvbo, false, false);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000364 if (ret == 0) {
365 switch (bo->mem.mem_type) {
366 case TTM_PL_VRAM:
Ben Skeggsebb945a2012-07-20 08:17:34 +1000367 drm->gem.vram_available += bo->mem.size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000368 break;
369 case TTM_PL_TT:
Ben Skeggsebb945a2012-07-20 08:17:34 +1000370 drm->gem.gart_available += bo->mem.size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000371 break;
372 default:
373 break;
374 }
375 }
376
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100377out:
Ben Skeggs6ee73862009-12-11 19:24:15 +1000378 ttm_bo_unreserve(bo);
379 return ret;
380}
381
382int
383nouveau_bo_map(struct nouveau_bo *nvbo)
384{
385 int ret;
386
Thierry Redingee3939e2014-07-21 13:15:51 +0200387 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000388 if (ret)
389 return ret;
390
391 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
392 ttm_bo_unreserve(&nvbo->bo);
393 return ret;
394}
395
396void
397nouveau_bo_unmap(struct nouveau_bo *nvbo)
398{
Ben Skeggs9d59e8a2010-08-27 13:04:41 +1000399 if (nvbo)
400 ttm_bo_kunmap(&nvbo->kmap);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000401}
402
Ben Skeggs7a45d762010-11-22 08:50:27 +1000403int
404nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000405 bool no_wait_gpu)
Ben Skeggs7a45d762010-11-22 08:50:27 +1000406{
407 int ret;
408
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000409 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
410 interruptible, no_wait_gpu);
Ben Skeggs7a45d762010-11-22 08:50:27 +1000411 if (ret)
412 return ret;
413
414 return 0;
415}
416
Ben Skeggs6ee73862009-12-11 19:24:15 +1000417u16
418nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
419{
420 bool is_iomem;
421 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
422 mem = &mem[index];
423 if (is_iomem)
424 return ioread16_native((void __force __iomem *)mem);
425 else
426 return *mem;
427}
428
429void
430nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
431{
432 bool is_iomem;
433 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
434 mem = &mem[index];
435 if (is_iomem)
436 iowrite16_native(val, (void __force __iomem *)mem);
437 else
438 *mem = val;
439}
440
441u32
442nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
443{
444 bool is_iomem;
445 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
446 mem = &mem[index];
447 if (is_iomem)
448 return ioread32_native((void __force __iomem *)mem);
449 else
450 return *mem;
451}
452
453void
454nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
455{
456 bool is_iomem;
457 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
458 mem = &mem[index];
459 if (is_iomem)
460 iowrite32_native(val, (void __force __iomem *)mem);
461 else
462 *mem = val;
463}
464
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400465static struct ttm_tt *
Ben Skeggsebb945a2012-07-20 08:17:34 +1000466nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
467 uint32_t page_flags, struct page *dummy_read)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000468{
Max Filippovdf1b4b92012-10-14 01:58:26 +0400469#if __OS_HAS_AGP
Ben Skeggsebb945a2012-07-20 08:17:34 +1000470 struct nouveau_drm *drm = nouveau_bdev(bdev);
471 struct drm_device *dev = drm->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000472
Ben Skeggsebb945a2012-07-20 08:17:34 +1000473 if (drm->agp.stat == ENABLED) {
474 return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
475 page_flags, dummy_read);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000476 }
Max Filippovdf1b4b92012-10-14 01:58:26 +0400477#endif
Ben Skeggs6ee73862009-12-11 19:24:15 +1000478
Ben Skeggsebb945a2012-07-20 08:17:34 +1000479 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000480}
481
482static int
483nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
484{
485 /* We'll do this from user space. */
486 return 0;
487}
488
489static int
490nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
491 struct ttm_mem_type_manager *man)
492{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000493 struct nouveau_drm *drm = nouveau_bdev(bdev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000494
495 switch (type) {
496 case TTM_PL_SYSTEM:
497 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
498 man->available_caching = TTM_PL_MASK_CACHING;
499 man->default_caching = TTM_PL_FLAG_CACHED;
500 break;
501 case TTM_PL_VRAM:
Alexandre Courbote2a4e782014-06-27 19:28:50 +0900502 man->flags = TTM_MEMTYPE_FLAG_FIXED |
503 TTM_MEMTYPE_FLAG_MAPPABLE;
504 man->available_caching = TTM_PL_FLAG_UNCACHED |
505 TTM_PL_FLAG_WC;
506 man->default_caching = TTM_PL_FLAG_WC;
507
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000508 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
Alexandre Courbote2a4e782014-06-27 19:28:50 +0900509 /* Some BARs do not support being ioremapped WC */
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000510 if (nvkm_bar(&drm->device)->iomap_uncached) {
Alexandre Courbote2a4e782014-06-27 19:28:50 +0900511 man->available_caching = TTM_PL_FLAG_UNCACHED;
512 man->default_caching = TTM_PL_FLAG_UNCACHED;
513 }
514
Ben Skeggs573a2a32010-08-25 15:26:04 +1000515 man->func = &nouveau_vram_manager;
Ben Skeggsf869ef82010-11-15 11:53:16 +1000516 man->io_reserve_fastpath = false;
517 man->use_io_reserve_lru = true;
518 } else {
Ben Skeggs573a2a32010-08-25 15:26:04 +1000519 man->func = &ttm_bo_manager_func;
Ben Skeggsf869ef82010-11-15 11:53:16 +1000520 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000521 break;
522 case TTM_PL_TT:
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000523 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
Ben Skeggs26c0c9e2011-02-10 12:59:51 +1000524 man->func = &nouveau_gart_manager;
525 else
Ben Skeggsebb945a2012-07-20 08:17:34 +1000526 if (drm->agp.stat != ENABLED)
Ben Skeggs3863c9b2012-07-14 19:09:17 +1000527 man->func = &nv04_gart_manager;
528 else
Ben Skeggs26c0c9e2011-02-10 12:59:51 +1000529 man->func = &ttm_bo_manager_func;
Ben Skeggsebb945a2012-07-20 08:17:34 +1000530
531 if (drm->agp.stat == ENABLED) {
Jerome Glissef32f02f2010-04-09 14:39:25 +0200532 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
Francisco Jereza3d487e2010-11-20 22:11:22 +0100533 man->available_caching = TTM_PL_FLAG_UNCACHED |
534 TTM_PL_FLAG_WC;
535 man->default_caching = TTM_PL_FLAG_WC;
Ben Skeggsebb945a2012-07-20 08:17:34 +1000536 } else {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000537 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
538 TTM_MEMTYPE_FLAG_CMA;
539 man->available_caching = TTM_PL_MASK_CACHING;
540 man->default_caching = TTM_PL_FLAG_CACHED;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000541 }
Ben Skeggsebb945a2012-07-20 08:17:34 +1000542
Ben Skeggs6ee73862009-12-11 19:24:15 +1000543 break;
544 default:
Ben Skeggs6ee73862009-12-11 19:24:15 +1000545 return -EINVAL;
546 }
547 return 0;
548}
549
550static void
551nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
552{
553 struct nouveau_bo *nvbo = nouveau_bo(bo);
554
555 switch (bo->mem.mem_type) {
Francisco Jerez22fbd532009-12-11 18:40:17 +0100556 case TTM_PL_VRAM:
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100557 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
558 TTM_PL_FLAG_SYSTEM);
Francisco Jerez22fbd532009-12-11 18:40:17 +0100559 break;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000560 default:
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100561 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000562 break;
563 }
Francisco Jerez22fbd532009-12-11 18:40:17 +0100564
565 *pl = nvbo->placement;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000566}
567
568
Ben Skeggs6ee73862009-12-11 19:24:15 +1000569static int
Ben Skeggs49981042012-08-06 19:38:25 +1000570nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
571{
572 int ret = RING_SPACE(chan, 2);
573 if (ret == 0) {
574 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
Ben Skeggs00fc6f62013-07-09 14:20:15 +1000575 OUT_RING (chan, handle & 0x0000ffff);
Ben Skeggs49981042012-08-06 19:38:25 +1000576 FIRE_RING (chan);
577 }
578 return ret;
579}
580
581static int
Ben Skeggsc6b7e892012-03-20 14:36:04 +1000582nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
583 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
584{
585 struct nouveau_mem *node = old_mem->mm_node;
586 int ret = RING_SPACE(chan, 10);
587 if (ret == 0) {
Ben Skeggs6d597022012-04-01 21:09:13 +1000588 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
Ben Skeggsc6b7e892012-03-20 14:36:04 +1000589 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
590 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
591 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
592 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
593 OUT_RING (chan, PAGE_SIZE);
594 OUT_RING (chan, PAGE_SIZE);
595 OUT_RING (chan, PAGE_SIZE);
596 OUT_RING (chan, new_mem->num_pages);
Ben Skeggs6d597022012-04-01 21:09:13 +1000597 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
Ben Skeggsc6b7e892012-03-20 14:36:04 +1000598 }
599 return ret;
600}
601
602static int
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000603nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
604{
605 int ret = RING_SPACE(chan, 2);
606 if (ret == 0) {
607 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
608 OUT_RING (chan, handle);
609 }
610 return ret;
611}
612
613static int
Ben Skeggs1a460982012-05-04 15:17:28 +1000614nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
615 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
616{
617 struct nouveau_mem *node = old_mem->mm_node;
618 u64 src_offset = node->vma[0].offset;
619 u64 dst_offset = node->vma[1].offset;
620 u32 page_count = new_mem->num_pages;
621 int ret;
622
623 page_count = new_mem->num_pages;
624 while (page_count) {
625 int line_count = (page_count > 8191) ? 8191 : page_count;
626
627 ret = RING_SPACE(chan, 11);
628 if (ret)
629 return ret;
630
631 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
632 OUT_RING (chan, upper_32_bits(src_offset));
633 OUT_RING (chan, lower_32_bits(src_offset));
634 OUT_RING (chan, upper_32_bits(dst_offset));
635 OUT_RING (chan, lower_32_bits(dst_offset));
636 OUT_RING (chan, PAGE_SIZE);
637 OUT_RING (chan, PAGE_SIZE);
638 OUT_RING (chan, PAGE_SIZE);
639 OUT_RING (chan, line_count);
640 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
641 OUT_RING (chan, 0x00000110);
642
643 page_count -= line_count;
644 src_offset += (PAGE_SIZE * line_count);
645 dst_offset += (PAGE_SIZE * line_count);
646 }
647
648 return 0;
649}
650
651static int
Ben Skeggs183720b2010-12-09 15:17:10 +1000652nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
653 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
654{
Ben Skeggsd2f966662011-06-06 20:54:42 +1000655 struct nouveau_mem *node = old_mem->mm_node;
656 u64 src_offset = node->vma[0].offset;
657 u64 dst_offset = node->vma[1].offset;
Ben Skeggs183720b2010-12-09 15:17:10 +1000658 u32 page_count = new_mem->num_pages;
659 int ret;
660
Ben Skeggs183720b2010-12-09 15:17:10 +1000661 page_count = new_mem->num_pages;
662 while (page_count) {
663 int line_count = (page_count > 2047) ? 2047 : page_count;
664
665 ret = RING_SPACE(chan, 12);
666 if (ret)
667 return ret;
668
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000669 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
Ben Skeggs183720b2010-12-09 15:17:10 +1000670 OUT_RING (chan, upper_32_bits(dst_offset));
671 OUT_RING (chan, lower_32_bits(dst_offset));
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000672 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
Ben Skeggs183720b2010-12-09 15:17:10 +1000673 OUT_RING (chan, upper_32_bits(src_offset));
674 OUT_RING (chan, lower_32_bits(src_offset));
675 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
676 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
677 OUT_RING (chan, PAGE_SIZE); /* line_length */
678 OUT_RING (chan, line_count);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000679 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
Ben Skeggs183720b2010-12-09 15:17:10 +1000680 OUT_RING (chan, 0x00100110);
681
682 page_count -= line_count;
683 src_offset += (PAGE_SIZE * line_count);
684 dst_offset += (PAGE_SIZE * line_count);
685 }
686
687 return 0;
688}
689
690static int
Ben Skeggsfdf53242012-05-04 15:15:12 +1000691nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
692 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
693{
694 struct nouveau_mem *node = old_mem->mm_node;
695 u64 src_offset = node->vma[0].offset;
696 u64 dst_offset = node->vma[1].offset;
697 u32 page_count = new_mem->num_pages;
698 int ret;
699
700 page_count = new_mem->num_pages;
701 while (page_count) {
702 int line_count = (page_count > 8191) ? 8191 : page_count;
703
704 ret = RING_SPACE(chan, 11);
705 if (ret)
706 return ret;
707
708 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
709 OUT_RING (chan, upper_32_bits(src_offset));
710 OUT_RING (chan, lower_32_bits(src_offset));
711 OUT_RING (chan, upper_32_bits(dst_offset));
712 OUT_RING (chan, lower_32_bits(dst_offset));
713 OUT_RING (chan, PAGE_SIZE);
714 OUT_RING (chan, PAGE_SIZE);
715 OUT_RING (chan, PAGE_SIZE);
716 OUT_RING (chan, line_count);
717 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
718 OUT_RING (chan, 0x00000110);
719
720 page_count -= line_count;
721 src_offset += (PAGE_SIZE * line_count);
722 dst_offset += (PAGE_SIZE * line_count);
723 }
724
725 return 0;
726}
727
728static int
Ben Skeggs5490e5d2012-05-04 14:34:16 +1000729nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
730 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
731{
732 struct nouveau_mem *node = old_mem->mm_node;
733 int ret = RING_SPACE(chan, 7);
734 if (ret == 0) {
735 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
736 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
737 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
738 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
739 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
740 OUT_RING (chan, 0x00000000 /* COPY */);
741 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
742 }
743 return ret;
744}
745
746static int
Ben Skeggs4c193d22012-05-04 14:21:15 +1000747nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
748 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
749{
750 struct nouveau_mem *node = old_mem->mm_node;
751 int ret = RING_SPACE(chan, 7);
752 if (ret == 0) {
753 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
754 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
755 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
756 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
757 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
758 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
759 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
760 }
761 return ret;
762}
763
764static int
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000765nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
766{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000767 int ret = RING_SPACE(chan, 6);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000768 if (ret == 0) {
Ben Skeggsebb945a2012-07-20 08:17:34 +1000769 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
770 OUT_RING (chan, handle);
771 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
772 OUT_RING (chan, NvNotify0);
773 OUT_RING (chan, NvDmaFB);
774 OUT_RING (chan, NvDmaFB);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000775 }
776
777 return ret;
778}
779
780static int
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000781nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
782 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000783{
Ben Skeggsd2f966662011-06-06 20:54:42 +1000784 struct nouveau_mem *node = old_mem->mm_node;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000785 u64 length = (new_mem->num_pages << PAGE_SHIFT);
Ben Skeggsd2f966662011-06-06 20:54:42 +1000786 u64 src_offset = node->vma[0].offset;
787 u64 dst_offset = node->vma[1].offset;
Maarten Lankhorstce8f7692013-11-12 13:34:08 +0100788 int src_tiled = !!node->memtype;
789 int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000790 int ret;
791
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000792 while (length) {
793 u32 amount, stride, height;
794
Maarten Lankhorstce8f7692013-11-12 13:34:08 +0100795 ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
796 if (ret)
797 return ret;
798
Ben Skeggs5220b3c2010-09-23 15:21:17 +1000799 amount = min(length, (u64)(4 * 1024 * 1024));
800 stride = 16 * 4;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000801 height = amount / stride;
802
Maarten Lankhorstce8f7692013-11-12 13:34:08 +0100803 if (src_tiled) {
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000804 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000805 OUT_RING (chan, 0);
Ben Skeggs5220b3c2010-09-23 15:21:17 +1000806 OUT_RING (chan, 0);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000807 OUT_RING (chan, stride);
808 OUT_RING (chan, height);
809 OUT_RING (chan, 1);
810 OUT_RING (chan, 0);
811 OUT_RING (chan, 0);
812 } else {
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000813 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000814 OUT_RING (chan, 1);
815 }
Maarten Lankhorstce8f7692013-11-12 13:34:08 +0100816 if (dst_tiled) {
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000817 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000818 OUT_RING (chan, 0);
Ben Skeggs5220b3c2010-09-23 15:21:17 +1000819 OUT_RING (chan, 0);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000820 OUT_RING (chan, stride);
821 OUT_RING (chan, height);
822 OUT_RING (chan, 1);
823 OUT_RING (chan, 0);
824 OUT_RING (chan, 0);
825 } else {
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000826 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000827 OUT_RING (chan, 1);
828 }
829
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000830 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000831 OUT_RING (chan, upper_32_bits(src_offset));
832 OUT_RING (chan, upper_32_bits(dst_offset));
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000833 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000834 OUT_RING (chan, lower_32_bits(src_offset));
835 OUT_RING (chan, lower_32_bits(dst_offset));
836 OUT_RING (chan, stride);
837 OUT_RING (chan, stride);
838 OUT_RING (chan, stride);
839 OUT_RING (chan, height);
840 OUT_RING (chan, 0x00000101);
841 OUT_RING (chan, 0x00000000);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000842 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000843 OUT_RING (chan, 0);
844
845 length -= amount;
846 src_offset += amount;
847 dst_offset += amount;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000848 }
849
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000850 return 0;
851}
852
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000853static int
854nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
855{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000856 int ret = RING_SPACE(chan, 4);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000857 if (ret == 0) {
Ben Skeggsebb945a2012-07-20 08:17:34 +1000858 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
859 OUT_RING (chan, handle);
860 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
861 OUT_RING (chan, NvNotify0);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000862 }
863
864 return ret;
865}
866
Ben Skeggsa6704782011-02-16 09:10:20 +1000867static inline uint32_t
868nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
869 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
870{
871 if (mem->mem_type == TTM_PL_TT)
Ben Skeggsebb945a2012-07-20 08:17:34 +1000872 return NvDmaTT;
873 return NvDmaFB;
Ben Skeggsa6704782011-02-16 09:10:20 +1000874}
875
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000876static int
877nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
878 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
879{
Ben Skeggsd961db72010-08-05 10:48:18 +1000880 u32 src_offset = old_mem->start << PAGE_SHIFT;
881 u32 dst_offset = new_mem->start << PAGE_SHIFT;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000882 u32 page_count = new_mem->num_pages;
883 int ret;
884
885 ret = RING_SPACE(chan, 3);
886 if (ret)
887 return ret;
888
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000889 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000890 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
891 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
892
Ben Skeggs6ee73862009-12-11 19:24:15 +1000893 page_count = new_mem->num_pages;
894 while (page_count) {
895 int line_count = (page_count > 2047) ? 2047 : page_count;
896
Ben Skeggs6ee73862009-12-11 19:24:15 +1000897 ret = RING_SPACE(chan, 11);
898 if (ret)
899 return ret;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000900
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000901 BEGIN_NV04(chan, NvSubCopy,
Ben Skeggs6ee73862009-12-11 19:24:15 +1000902 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000903 OUT_RING (chan, src_offset);
904 OUT_RING (chan, dst_offset);
905 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
906 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
907 OUT_RING (chan, PAGE_SIZE); /* line_length */
908 OUT_RING (chan, line_count);
909 OUT_RING (chan, 0x00000101);
910 OUT_RING (chan, 0x00000000);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000911 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000912 OUT_RING (chan, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000913
914 page_count -= line_count;
915 src_offset += (PAGE_SIZE * line_count);
916 dst_offset += (PAGE_SIZE * line_count);
917 }
918
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000919 return 0;
920}
921
922static int
Ben Skeggs3c57d852013-11-22 10:35:25 +1000923nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
924 struct ttm_mem_reg *mem)
Ben Skeggsd2f966662011-06-06 20:54:42 +1000925{
Ben Skeggs3c57d852013-11-22 10:35:25 +1000926 struct nouveau_mem *old_node = bo->mem.mm_node;
927 struct nouveau_mem *new_node = mem->mm_node;
928 u64 size = (u64)mem->num_pages << PAGE_SHIFT;
Ben Skeggsd2f966662011-06-06 20:54:42 +1000929 int ret;
930
Ben Skeggs3ee6f5b2014-08-10 04:10:20 +1000931 ret = nouveau_vm_get(drm->client.vm, size, old_node->page_shift,
Ben Skeggs3c57d852013-11-22 10:35:25 +1000932 NV_MEM_ACCESS_RW, &old_node->vma[0]);
Ben Skeggsd2f966662011-06-06 20:54:42 +1000933 if (ret)
934 return ret;
935
Ben Skeggs3ee6f5b2014-08-10 04:10:20 +1000936 ret = nouveau_vm_get(drm->client.vm, size, new_node->page_shift,
Ben Skeggs3c57d852013-11-22 10:35:25 +1000937 NV_MEM_ACCESS_RW, &old_node->vma[1]);
938 if (ret) {
939 nouveau_vm_put(&old_node->vma[0]);
940 return ret;
941 }
942
943 nouveau_vm_map(&old_node->vma[0], old_node);
944 nouveau_vm_map(&old_node->vma[1], new_node);
Ben Skeggsd2f966662011-06-06 20:54:42 +1000945 return 0;
946}
947
948static int
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000949nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000950 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000951{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000952 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
Dave Jones1934a2a2013-09-17 17:26:34 -0400953 struct nouveau_channel *chan = drm->ttm.chan;
Ben Skeggs35b81412013-11-22 10:39:57 +1000954 struct nouveau_fence *fence;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000955 int ret;
956
Ben Skeggsd2f966662011-06-06 20:54:42 +1000957 /* create temporary vmas for the transfer and attach them to the
958 * old nouveau_mem node, these will get cleaned up after ttm has
959 * destroyed the ttm_mem_reg
Ben Skeggs3425df42011-02-10 11:22:12 +1000960 */
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000961 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
Ben Skeggs3c57d852013-11-22 10:35:25 +1000962 ret = nouveau_bo_move_prep(drm, bo, new_mem);
Ben Skeggsd2f966662011-06-06 20:54:42 +1000963 if (ret)
Ben Skeggs3c57d852013-11-22 10:35:25 +1000964 return ret;
Ben Skeggs3425df42011-02-10 11:22:12 +1000965 }
966
Ben Skeggs3c57d852013-11-22 10:35:25 +1000967 mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING);
Ben Skeggs35b81412013-11-22 10:39:57 +1000968 ret = nouveau_fence_sync(bo->sync_obj, chan);
Ben Skeggs6a6b73f2010-10-05 16:53:48 +1000969 if (ret == 0) {
Ben Skeggs35b81412013-11-22 10:39:57 +1000970 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
971 if (ret == 0) {
972 ret = nouveau_fence_new(chan, false, &fence);
973 if (ret == 0) {
974 ret = ttm_bo_move_accel_cleanup(bo, fence,
975 evict,
976 no_wait_gpu,
977 new_mem);
978 nouveau_fence_unref(&fence);
979 }
980 }
Ben Skeggs6a6b73f2010-10-05 16:53:48 +1000981 }
Ben Skeggsebb945a2012-07-20 08:17:34 +1000982 mutex_unlock(&chan->cli->mutex);
Ben Skeggs6a6b73f2010-10-05 16:53:48 +1000983 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000984}
985
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000986void
Ben Skeggs49981042012-08-06 19:38:25 +1000987nouveau_bo_move_init(struct nouveau_drm *drm)
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000988{
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000989 static const struct {
990 const char *name;
Ben Skeggs1a460982012-05-04 15:17:28 +1000991 int engine;
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000992 u32 oclass;
993 int (*exec)(struct nouveau_channel *,
994 struct ttm_buffer_object *,
995 struct ttm_mem_reg *, struct ttm_mem_reg *);
996 int (*init)(struct nouveau_channel *, u32 handle);
997 } _methods[] = {
Ben Skeggs00fc6f62013-07-09 14:20:15 +1000998 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
Ben Skeggs49981042012-08-06 19:38:25 +1000999 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
Ben Skeggs1a460982012-05-04 15:17:28 +10001000 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1001 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1002 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1003 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1004 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1005 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1006 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
Ben Skeggs5490e5d2012-05-04 14:34:16 +10001007 {},
Ben Skeggs1a460982012-05-04 15:17:28 +10001008 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001009 }, *mthd = _methods;
1010 const char *name = "CPU";
1011 int ret;
1012
1013 do {
Ben Skeggsebb945a2012-07-20 08:17:34 +10001014 struct nouveau_object *object;
Ben Skeggs49981042012-08-06 19:38:25 +10001015 struct nouveau_channel *chan;
Ben Skeggs1a460982012-05-04 15:17:28 +10001016 u32 handle = (mthd->engine << 16) | mthd->oclass;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001017
Ben Skeggs00fc6f62013-07-09 14:20:15 +10001018 if (mthd->engine)
Ben Skeggs49981042012-08-06 19:38:25 +10001019 chan = drm->cechan;
1020 else
1021 chan = drm->channel;
1022 if (chan == NULL)
1023 continue;
1024
1025 ret = nouveau_object_new(nv_object(drm), chan->handle, handle,
Ben Skeggsebb945a2012-07-20 08:17:34 +10001026 mthd->oclass, NULL, 0, &object);
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001027 if (ret == 0) {
Ben Skeggs1a460982012-05-04 15:17:28 +10001028 ret = mthd->init(chan, handle);
Ben Skeggsebb945a2012-07-20 08:17:34 +10001029 if (ret) {
Ben Skeggs49981042012-08-06 19:38:25 +10001030 nouveau_object_del(nv_object(drm),
Ben Skeggsebb945a2012-07-20 08:17:34 +10001031 chan->handle, handle);
1032 continue;
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001033 }
Ben Skeggsebb945a2012-07-20 08:17:34 +10001034
1035 drm->ttm.move = mthd->exec;
Ben Skeggs1bb3f6a2013-07-08 10:40:35 +10001036 drm->ttm.chan = chan;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001037 name = mthd->name;
1038 break;
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001039 }
1040 } while ((++mthd)->exec);
1041
Ben Skeggsebb945a2012-07-20 08:17:34 +10001042 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001043}
1044
Ben Skeggs6ee73862009-12-11 19:24:15 +10001045static int
1046nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001047 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
Ben Skeggs6ee73862009-12-11 19:24:15 +10001048{
1049 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1050 struct ttm_placement placement;
1051 struct ttm_mem_reg tmp_mem;
1052 int ret;
1053
1054 placement.fpfn = placement.lpfn = 0;
1055 placement.num_placement = placement.num_busy_placement = 1;
Francisco Jerez77e2b5e2009-12-16 19:05:00 +01001056 placement.placement = placement.busy_placement = &placement_memtype;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001057
1058 tmp_mem = *new_mem;
1059 tmp_mem.mm_node = NULL;
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001060 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001061 if (ret)
1062 return ret;
1063
1064 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
1065 if (ret)
1066 goto out;
1067
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001068 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001069 if (ret)
1070 goto out;
1071
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001072 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001073out:
Ben Skeggs42311ff2010-08-04 12:07:08 +10001074 ttm_bo_mem_put(bo, &tmp_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001075 return ret;
1076}
1077
1078static int
1079nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001080 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
Ben Skeggs6ee73862009-12-11 19:24:15 +10001081{
1082 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1083 struct ttm_placement placement;
1084 struct ttm_mem_reg tmp_mem;
1085 int ret;
1086
1087 placement.fpfn = placement.lpfn = 0;
1088 placement.num_placement = placement.num_busy_placement = 1;
Francisco Jerez77e2b5e2009-12-16 19:05:00 +01001089 placement.placement = placement.busy_placement = &placement_memtype;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001090
1091 tmp_mem = *new_mem;
1092 tmp_mem.mm_node = NULL;
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001093 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001094 if (ret)
1095 return ret;
1096
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001097 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001098 if (ret)
1099 goto out;
1100
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001101 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001102 if (ret)
1103 goto out;
1104
1105out:
Ben Skeggs42311ff2010-08-04 12:07:08 +10001106 ttm_bo_mem_put(bo, &tmp_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001107 return ret;
1108}
1109
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001110static void
1111nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1112{
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001113 struct nouveau_bo *nvbo = nouveau_bo(bo);
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001114 struct nouveau_vma *vma;
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001115
Ben Skeggs9f1feed2012-01-25 15:34:22 +10001116 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1117 if (bo->destroy != nouveau_bo_del_ttm)
1118 return;
1119
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001120 list_for_each_entry(vma, &nvbo->vma_list, head) {
Ben Skeggs2e2cfbe2013-11-15 11:56:49 +10001121 if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
1122 (new_mem->mem_type == TTM_PL_VRAM ||
1123 nvbo->page_shift != vma->vm->vmm->lpg_shift)) {
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001124 nouveau_vm_map(vma, new_mem->mm_node);
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001125 } else {
1126 nouveau_vm_unmap(vma);
1127 }
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001128 }
1129}
1130
Ben Skeggs6ee73862009-12-11 19:24:15 +10001131static int
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001132nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
Ben Skeggsebb945a2012-07-20 08:17:34 +10001133 struct nouveau_drm_tile **new_tile)
Ben Skeggs6ee73862009-12-11 19:24:15 +10001134{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001135 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1136 struct drm_device *dev = drm->dev;
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001137 struct nouveau_bo *nvbo = nouveau_bo(bo);
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001138 u64 offset = new_mem->start << PAGE_SHIFT;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001139
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001140 *new_tile = NULL;
1141 if (new_mem->mem_type != TTM_PL_VRAM)
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001142 return 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001143
Ben Skeggs967e7bd2014-08-10 04:10:22 +10001144 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
Ben Skeggsbc9e7b92012-07-19 17:54:21 +10001145 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
Francisco Jereza5cf68b2010-10-24 16:14:41 +02001146 nvbo->tile_mode,
1147 nvbo->tile_flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001148 }
1149
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001150 return 0;
1151}
Ben Skeggs6ee73862009-12-11 19:24:15 +10001152
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001153static void
1154nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
Ben Skeggsebb945a2012-07-20 08:17:34 +10001155 struct nouveau_drm_tile *new_tile,
1156 struct nouveau_drm_tile **old_tile)
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001157{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001158 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1159 struct drm_device *dev = drm->dev;
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001160
Ben Skeggsbc9e7b92012-07-19 17:54:21 +10001161 nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj);
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001162 *old_tile = new_tile;
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001163}
1164
1165static int
1166nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001167 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001168{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001169 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001170 struct nouveau_bo *nvbo = nouveau_bo(bo);
1171 struct ttm_mem_reg *old_mem = &bo->mem;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001172 struct nouveau_drm_tile *new_tile = NULL;
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001173 int ret = 0;
1174
Ben Skeggs967e7bd2014-08-10 04:10:22 +10001175 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001176 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1177 if (ret)
1178 return ret;
1179 }
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001180
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001181 /* Fake bo copy. */
Ben Skeggs6ee73862009-12-11 19:24:15 +10001182 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1183 BUG_ON(bo->mem.mm_node != NULL);
1184 bo->mem = *new_mem;
1185 new_mem->mm_node = NULL;
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001186 goto out;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001187 }
1188
Ben Skeggscef9e992013-11-22 10:52:54 +10001189 /* Hardware assisted copy. */
1190 if (drm->ttm.move) {
1191 if (new_mem->mem_type == TTM_PL_SYSTEM)
1192 ret = nouveau_bo_move_flipd(bo, evict, intr,
1193 no_wait_gpu, new_mem);
1194 else if (old_mem->mem_type == TTM_PL_SYSTEM)
1195 ret = nouveau_bo_move_flips(bo, evict, intr,
1196 no_wait_gpu, new_mem);
1197 else
1198 ret = nouveau_bo_move_m2mf(bo, evict, intr,
1199 no_wait_gpu, new_mem);
1200 if (!ret)
1201 goto out;
Ben Skeggsb8a6a802010-08-27 11:55:43 +10001202 }
1203
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001204 /* Fallback to software copy. */
Ben Skeggscef9e992013-11-22 10:52:54 +10001205 spin_lock(&bo->bdev->fence_lock);
1206 ret = ttm_bo_wait(bo, true, intr, no_wait_gpu);
1207 spin_unlock(&bo->bdev->fence_lock);
1208 if (ret == 0)
1209 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001210
1211out:
Ben Skeggs967e7bd2014-08-10 04:10:22 +10001212 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001213 if (ret)
1214 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1215 else
1216 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1217 }
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001218
1219 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001220}
1221
1222static int
1223nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1224{
David Herrmannacb46522013-08-25 18:28:59 +02001225 struct nouveau_bo *nvbo = nouveau_bo(bo);
1226
David Herrmann55fb74a2013-10-02 10:15:17 +02001227 return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001228}
1229
Jerome Glissef32f02f2010-04-09 14:39:25 +02001230static int
1231nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1232{
1233 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
Ben Skeggsebb945a2012-07-20 08:17:34 +10001234 struct nouveau_drm *drm = nouveau_bdev(bdev);
Maarten Lankhorsta5540902013-11-12 13:34:09 +01001235 struct nouveau_mem *node = mem->mm_node;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001236 struct drm_device *dev = drm->dev;
Ben Skeggsf869ef82010-11-15 11:53:16 +10001237 int ret;
Jerome Glissef32f02f2010-04-09 14:39:25 +02001238
1239 mem->bus.addr = NULL;
1240 mem->bus.offset = 0;
1241 mem->bus.size = mem->num_pages << PAGE_SHIFT;
1242 mem->bus.base = 0;
1243 mem->bus.is_iomem = false;
1244 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1245 return -EINVAL;
1246 switch (mem->mem_type) {
1247 case TTM_PL_SYSTEM:
1248 /* System memory */
1249 return 0;
1250 case TTM_PL_TT:
1251#if __OS_HAS_AGP
Ben Skeggsebb945a2012-07-20 08:17:34 +10001252 if (drm->agp.stat == ENABLED) {
Ben Skeggsd961db72010-08-05 10:48:18 +10001253 mem->bus.offset = mem->start << PAGE_SHIFT;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001254 mem->bus.base = drm->agp.base;
Aaro Koskineneda85d62012-12-31 03:34:59 +02001255 mem->bus.is_iomem = !dev->agp->cant_use_aperture;
Jerome Glissef32f02f2010-04-09 14:39:25 +02001256 }
1257#endif
Ben Skeggs967e7bd2014-08-10 04:10:22 +10001258 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
Maarten Lankhorsta5540902013-11-12 13:34:09 +01001259 /* untiled */
1260 break;
1261 /* fallthrough, tiled memory */
Jerome Glissef32f02f2010-04-09 14:39:25 +02001262 case TTM_PL_VRAM:
Ben Skeggs3863c9b2012-07-14 19:09:17 +10001263 mem->bus.offset = mem->start << PAGE_SHIFT;
Ben Skeggs967e7bd2014-08-10 04:10:22 +10001264 mem->bus.base = nv_device_resource_start(nvkm_device(&drm->device), 1);
Jerome Glissef32f02f2010-04-09 14:39:25 +02001265 mem->bus.is_iomem = true;
Ben Skeggs967e7bd2014-08-10 04:10:22 +10001266 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1267 struct nouveau_bar *bar = nvkm_bar(&drm->device);
Ben Skeggs3863c9b2012-07-14 19:09:17 +10001268
Ben Skeggsebb945a2012-07-20 08:17:34 +10001269 ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
Ben Skeggs3863c9b2012-07-14 19:09:17 +10001270 &node->bar_vma);
1271 if (ret)
1272 return ret;
1273
1274 mem->bus.offset = node->bar_vma.offset;
1275 }
Jerome Glissef32f02f2010-04-09 14:39:25 +02001276 break;
1277 default:
1278 return -EINVAL;
1279 }
1280 return 0;
1281}
1282
1283static void
1284nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1285{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001286 struct nouveau_drm *drm = nouveau_bdev(bdev);
Ben Skeggs967e7bd2014-08-10 04:10:22 +10001287 struct nouveau_bar *bar = nvkm_bar(&drm->device);
Ben Skeggsd5f42392011-02-10 12:22:52 +10001288 struct nouveau_mem *node = mem->mm_node;
Ben Skeggsf869ef82010-11-15 11:53:16 +10001289
Ben Skeggsd5f42392011-02-10 12:22:52 +10001290 if (!node->bar_vma.node)
Ben Skeggsf869ef82010-11-15 11:53:16 +10001291 return;
1292
Ben Skeggsebb945a2012-07-20 08:17:34 +10001293 bar->unmap(bar, &node->bar_vma);
Jerome Glissef32f02f2010-04-09 14:39:25 +02001294}
1295
1296static int
1297nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1298{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001299 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
Ben Skeggse1429b42010-09-10 11:12:25 +10001300 struct nouveau_bo *nvbo = nouveau_bo(bo);
Ben Skeggs967e7bd2014-08-10 04:10:22 +10001301 struct nvif_device *device = &drm->device;
1302 u32 mappable = nv_device_resource_len(nvkm_device(device), 1) >> PAGE_SHIFT;
Maarten Lankhorsta5540902013-11-12 13:34:09 +01001303 int ret;
Ben Skeggse1429b42010-09-10 11:12:25 +10001304
1305 /* as long as the bo isn't in vram, and isn't tiled, we've got
1306 * nothing to do here.
1307 */
1308 if (bo->mem.mem_type != TTM_PL_VRAM) {
Ben Skeggs967e7bd2014-08-10 04:10:22 +10001309 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA ||
Francisco Jerezf13b3262010-10-10 06:01:08 +02001310 !nouveau_bo_tile_layout(nvbo))
Ben Skeggse1429b42010-09-10 11:12:25 +10001311 return 0;
Maarten Lankhorsta5540902013-11-12 13:34:09 +01001312
1313 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1314 nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1315
1316 ret = nouveau_bo_validate(nvbo, false, false);
1317 if (ret)
1318 return ret;
1319 }
1320 return 0;
Ben Skeggse1429b42010-09-10 11:12:25 +10001321 }
1322
1323 /* make sure bo is in mappable vram */
Ben Skeggs967e7bd2014-08-10 04:10:22 +10001324 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
Maarten Lankhorsta5540902013-11-12 13:34:09 +01001325 bo->mem.start + bo->mem.num_pages < mappable)
Ben Skeggse1429b42010-09-10 11:12:25 +10001326 return 0;
1327
1328
1329 nvbo->placement.fpfn = 0;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001330 nvbo->placement.lpfn = mappable;
Dave Airliec2848152012-05-18 15:31:12 +01001331 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001332 return nouveau_bo_validate(nvbo, false, false);
Jerome Glissef32f02f2010-04-09 14:39:25 +02001333}
1334
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001335static int
1336nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1337{
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001338 struct ttm_dma_tt *ttm_dma = (void *)ttm;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001339 struct nouveau_drm *drm;
Alexandre Courbot420b9462014-02-17 15:17:26 +09001340 struct nouveau_device *device;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001341 struct drm_device *dev;
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001342 struct device *pdev;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001343 unsigned i;
1344 int r;
Dave Airlie22b33e82012-04-02 11:53:06 +01001345 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001346
1347 if (ttm->state != tt_unpopulated)
1348 return 0;
1349
Dave Airlie22b33e82012-04-02 11:53:06 +01001350 if (slave && ttm->sg) {
1351 /* make userspace faulting work */
1352 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1353 ttm_dma->dma_address, ttm->num_pages);
1354 ttm->state = tt_unbound;
1355 return 0;
1356 }
1357
Ben Skeggsebb945a2012-07-20 08:17:34 +10001358 drm = nouveau_bdev(ttm->bdev);
Ben Skeggs967e7bd2014-08-10 04:10:22 +10001359 device = nvkm_device(&drm->device);
Ben Skeggsebb945a2012-07-20 08:17:34 +10001360 dev = drm->dev;
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001361 pdev = nv_device_base(device);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001362
Jerome Glissedea7e0a2012-01-03 17:37:37 -05001363#if __OS_HAS_AGP
Ben Skeggsebb945a2012-07-20 08:17:34 +10001364 if (drm->agp.stat == ENABLED) {
Jerome Glissedea7e0a2012-01-03 17:37:37 -05001365 return ttm_agp_tt_populate(ttm);
1366 }
1367#endif
1368
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001369#ifdef CONFIG_SWIOTLB
1370 if (swiotlb_nr_tbl()) {
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001371 return ttm_dma_populate((void *)ttm, dev->dev);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001372 }
1373#endif
1374
1375 r = ttm_pool_populate(ttm);
1376 if (r) {
1377 return r;
1378 }
1379
1380 for (i = 0; i < ttm->num_pages; i++) {
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001381 dma_addr_t addr;
1382
1383 addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
1384 DMA_BIDIRECTIONAL);
1385
1386 if (dma_mapping_error(pdev, addr)) {
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001387 while (--i) {
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001388 dma_unmap_page(pdev, ttm_dma->dma_address[i],
1389 PAGE_SIZE, DMA_BIDIRECTIONAL);
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001390 ttm_dma->dma_address[i] = 0;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001391 }
1392 ttm_pool_unpopulate(ttm);
1393 return -EFAULT;
1394 }
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001395
1396 ttm_dma->dma_address[i] = addr;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001397 }
1398 return 0;
1399}
1400
1401static void
1402nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1403{
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001404 struct ttm_dma_tt *ttm_dma = (void *)ttm;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001405 struct nouveau_drm *drm;
Alexandre Courbot420b9462014-02-17 15:17:26 +09001406 struct nouveau_device *device;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001407 struct drm_device *dev;
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001408 struct device *pdev;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001409 unsigned i;
Dave Airlie22b33e82012-04-02 11:53:06 +01001410 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1411
1412 if (slave)
1413 return;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001414
Ben Skeggsebb945a2012-07-20 08:17:34 +10001415 drm = nouveau_bdev(ttm->bdev);
Ben Skeggs967e7bd2014-08-10 04:10:22 +10001416 device = nvkm_device(&drm->device);
Ben Skeggsebb945a2012-07-20 08:17:34 +10001417 dev = drm->dev;
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001418 pdev = nv_device_base(device);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001419
Jerome Glissedea7e0a2012-01-03 17:37:37 -05001420#if __OS_HAS_AGP
Ben Skeggsebb945a2012-07-20 08:17:34 +10001421 if (drm->agp.stat == ENABLED) {
Jerome Glissedea7e0a2012-01-03 17:37:37 -05001422 ttm_agp_tt_unpopulate(ttm);
1423 return;
1424 }
1425#endif
1426
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001427#ifdef CONFIG_SWIOTLB
1428 if (swiotlb_nr_tbl()) {
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001429 ttm_dma_unpopulate((void *)ttm, dev->dev);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001430 return;
1431 }
1432#endif
1433
1434 for (i = 0; i < ttm->num_pages; i++) {
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001435 if (ttm_dma->dma_address[i]) {
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001436 dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
1437 DMA_BIDIRECTIONAL);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001438 }
1439 }
1440
1441 ttm_pool_unpopulate(ttm);
1442}
1443
Ben Skeggs875ac342012-04-30 12:51:48 +10001444void
1445nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1446{
Ben Skeggs5d216f62013-11-13 10:23:46 +10001447 struct nouveau_fence *new_fence = nouveau_fence_ref(fence);
Ben Skeggs875ac342012-04-30 12:51:48 +10001448 struct nouveau_fence *old_fence = NULL;
1449
Ben Skeggs875ac342012-04-30 12:51:48 +10001450 spin_lock(&nvbo->bo.bdev->fence_lock);
1451 old_fence = nvbo->bo.sync_obj;
Ben Skeggs5d216f62013-11-13 10:23:46 +10001452 nvbo->bo.sync_obj = new_fence;
Ben Skeggs875ac342012-04-30 12:51:48 +10001453 spin_unlock(&nvbo->bo.bdev->fence_lock);
1454
1455 nouveau_fence_unref(&old_fence);
1456}
1457
1458static void
1459nouveau_bo_fence_unref(void **sync_obj)
1460{
1461 nouveau_fence_unref((struct nouveau_fence **)sync_obj);
1462}
1463
1464static void *
1465nouveau_bo_fence_ref(void *sync_obj)
1466{
1467 return nouveau_fence_ref(sync_obj);
1468}
1469
1470static bool
Maarten Lankhorstdedfdff2012-10-12 15:04:00 +00001471nouveau_bo_fence_signalled(void *sync_obj)
Ben Skeggs875ac342012-04-30 12:51:48 +10001472{
Ben Skeggsd375e7d52012-04-30 13:30:00 +10001473 return nouveau_fence_done(sync_obj);
Ben Skeggs875ac342012-04-30 12:51:48 +10001474}
1475
1476static int
Maarten Lankhorstdedfdff2012-10-12 15:04:00 +00001477nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr)
Ben Skeggs875ac342012-04-30 12:51:48 +10001478{
1479 return nouveau_fence_wait(sync_obj, lazy, intr);
1480}
1481
1482static int
Maarten Lankhorstdedfdff2012-10-12 15:04:00 +00001483nouveau_bo_fence_flush(void *sync_obj)
Ben Skeggs875ac342012-04-30 12:51:48 +10001484{
1485 return 0;
1486}
1487
Ben Skeggs6ee73862009-12-11 19:24:15 +10001488struct ttm_bo_driver nouveau_bo_driver = {
Jerome Glisse649bf3c2011-11-01 20:46:13 -04001489 .ttm_tt_create = &nouveau_ttm_tt_create,
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001490 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1491 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
Ben Skeggs6ee73862009-12-11 19:24:15 +10001492 .invalidate_caches = nouveau_bo_invalidate_caches,
1493 .init_mem_type = nouveau_bo_init_mem_type,
1494 .evict_flags = nouveau_bo_evict_flags,
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001495 .move_notify = nouveau_bo_move_ntfy,
Ben Skeggs6ee73862009-12-11 19:24:15 +10001496 .move = nouveau_bo_move,
1497 .verify_access = nouveau_bo_verify_access,
Ben Skeggs875ac342012-04-30 12:51:48 +10001498 .sync_obj_signaled = nouveau_bo_fence_signalled,
1499 .sync_obj_wait = nouveau_bo_fence_wait,
1500 .sync_obj_flush = nouveau_bo_fence_flush,
1501 .sync_obj_unref = nouveau_bo_fence_unref,
1502 .sync_obj_ref = nouveau_bo_fence_ref,
Jerome Glissef32f02f2010-04-09 14:39:25 +02001503 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1504 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1505 .io_mem_free = &nouveau_ttm_io_mem_free,
Ben Skeggs6ee73862009-12-11 19:24:15 +10001506};
1507
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001508struct nouveau_vma *
1509nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1510{
1511 struct nouveau_vma *vma;
1512 list_for_each_entry(vma, &nvbo->vma_list, head) {
1513 if (vma->vm == vm)
1514 return vma;
1515 }
1516
1517 return NULL;
1518}
1519
1520int
1521nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1522 struct nouveau_vma *vma)
1523{
1524 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001525 int ret;
1526
1527 ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1528 NV_MEM_ACCESS_RW, vma);
1529 if (ret)
1530 return ret;
1531
Ben Skeggs2e2cfbe2013-11-15 11:56:49 +10001532 if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
1533 (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
1534 nvbo->page_shift != vma->vm->vmm->lpg_shift))
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001535 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001536
1537 list_add_tail(&vma->head, &nvbo->vma_list);
Ben Skeggs2fd3db62011-06-07 15:25:12 +10001538 vma->refcount = 1;
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001539 return 0;
1540}
1541
1542void
1543nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1544{
1545 if (vma->node) {
Ben Skeggsc4c70442013-05-07 09:48:30 +10001546 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001547 nouveau_vm_unmap(vma);
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001548 nouveau_vm_put(vma);
1549 list_del(&vma->head);
1550 }
1551}