blob: 9a8adeec80cd160a5afb4011d0d89cd842cf428b [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
Ben Skeggsfdb751e2014-08-10 04:10:23 +100030#include <linux/dma-mapping.h>
Chris Metcalf3e2b7562013-02-01 13:44:33 -050031#include <linux/swiotlb.h>
Ben Skeggs6ee73862009-12-11 19:24:15 +100032
Ben Skeggsebb945a2012-07-20 08:17:34 +100033#include "nouveau_drm.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100034#include "nouveau_dma.h"
Ben Skeggsd375e7d52012-04-30 13:30:00 +100035#include "nouveau_fence.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100036
Ben Skeggsebb945a2012-07-20 08:17:34 +100037#include "nouveau_bo.h"
38#include "nouveau_ttm.h"
39#include "nouveau_gem.h"
Maarten Maathuisa5106042009-12-26 21:46:36 +010040
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100041/*
42 * NV10-NV40 tiling helpers
43 */
44
45static void
Ben Skeggsebb945a2012-07-20 08:17:34 +100046nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
47 u32 addr, u32 size, u32 pitch, u32 flags)
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100048{
Ben Skeggs77145f12012-07-31 16:16:21 +100049 struct nouveau_drm *drm = nouveau_drm(dev);
Ben Skeggsebb945a2012-07-20 08:17:34 +100050 int i = reg - drm->tile.reg;
Ben Skeggs967e7bd2014-08-10 04:10:22 +100051 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
Ben Skeggsebb945a2012-07-20 08:17:34 +100052 struct nouveau_fb_tile *tile = &pfb->tile.region[i];
53 struct nouveau_engine *engine;
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100054
Ben Skeggsebb945a2012-07-20 08:17:34 +100055 nouveau_fence_unref(&reg->fence);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100056
57 if (tile->pitch)
Ben Skeggsebb945a2012-07-20 08:17:34 +100058 pfb->tile.fini(pfb, i, tile);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100059
60 if (pitch)
Ben Skeggsebb945a2012-07-20 08:17:34 +100061 pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100062
Ben Skeggsebb945a2012-07-20 08:17:34 +100063 pfb->tile.prog(pfb, i, tile);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100064
Ben Skeggsebb945a2012-07-20 08:17:34 +100065 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
66 engine->tile_prog(engine, i);
67 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
68 engine->tile_prog(engine, i);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100069}
70
Ben Skeggsebb945a2012-07-20 08:17:34 +100071static struct nouveau_drm_tile *
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100072nv10_bo_get_tile_region(struct drm_device *dev, int i)
73{
Ben Skeggs77145f12012-07-31 16:16:21 +100074 struct nouveau_drm *drm = nouveau_drm(dev);
Ben Skeggsebb945a2012-07-20 08:17:34 +100075 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100076
Ben Skeggsebb945a2012-07-20 08:17:34 +100077 spin_lock(&drm->tile.lock);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100078
79 if (!tile->used &&
80 (!tile->fence || nouveau_fence_done(tile->fence)))
81 tile->used = true;
82 else
83 tile = NULL;
84
Ben Skeggsebb945a2012-07-20 08:17:34 +100085 spin_unlock(&drm->tile.lock);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100086 return tile;
87}
88
89static void
Ben Skeggsebb945a2012-07-20 08:17:34 +100090nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +020091 struct fence *fence)
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100092{
Ben Skeggs77145f12012-07-31 16:16:21 +100093 struct nouveau_drm *drm = nouveau_drm(dev);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100094
95 if (tile) {
Ben Skeggsebb945a2012-07-20 08:17:34 +100096 spin_lock(&drm->tile.lock);
Maarten Lankhorst809e9442014-04-09 16:19:30 +020097 tile->fence = (struct nouveau_fence *)fence_get(fence);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100098 tile->used = false;
Ben Skeggsebb945a2012-07-20 08:17:34 +100099 spin_unlock(&drm->tile.lock);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000100 }
101}
102
Ben Skeggsebb945a2012-07-20 08:17:34 +1000103static struct nouveau_drm_tile *
104nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
105 u32 size, u32 pitch, u32 flags)
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000106{
Ben Skeggs77145f12012-07-31 16:16:21 +1000107 struct nouveau_drm *drm = nouveau_drm(dev);
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000108 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
Ben Skeggsebb945a2012-07-20 08:17:34 +1000109 struct nouveau_drm_tile *tile, *found = NULL;
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000110 int i;
111
Ben Skeggsebb945a2012-07-20 08:17:34 +1000112 for (i = 0; i < pfb->tile.regions; i++) {
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000113 tile = nv10_bo_get_tile_region(dev, i);
114
115 if (pitch && !found) {
116 found = tile;
117 continue;
118
Ben Skeggsebb945a2012-07-20 08:17:34 +1000119 } else if (tile && pfb->tile.region[i].pitch) {
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000120 /* Kill an unused tile region. */
121 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
122 }
123
124 nv10_bo_put_tile_region(dev, tile, NULL);
125 }
126
127 if (found)
128 nv10_bo_update_tile_region(dev, found, addr, size,
129 pitch, flags);
130 return found;
131}
132
Ben Skeggs6ee73862009-12-11 19:24:15 +1000133static void
134nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
135{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000136 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
137 struct drm_device *dev = drm->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000138 struct nouveau_bo *nvbo = nouveau_bo(bo);
139
David Herrmann55fb74a2013-10-02 10:15:17 +0200140 if (unlikely(nvbo->gem.filp))
Ben Skeggs6ee73862009-12-11 19:24:15 +1000141 DRM_ERROR("bo %p still attached to GEM object\n", bo);
Maarten Lankhorst4f385592013-07-07 10:37:35 +0200142 WARN_ON(nvbo->pin_refcnt > 0);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000143 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000144 kfree(nvbo);
145}
146
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100147static void
Ben Skeggsdb5c8e22011-02-10 13:41:01 +1000148nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
Ben Skeggsf91bac52011-06-06 14:15:46 +1000149 int *align, int *size)
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100150{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000151 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000152 struct nvif_device *device = &drm->device;
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100153
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000154 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
Ben Skeggsbfd83ac2010-11-12 15:12:51 +1000155 if (nvbo->tile_mode) {
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000156 if (device->info.chipset >= 0x40) {
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100157 *align = 65536;
Ben Skeggsbfd83ac2010-11-12 15:12:51 +1000158 *size = roundup(*size, 64 * nvbo->tile_mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100159
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000160 } else if (device->info.chipset >= 0x30) {
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100161 *align = 32768;
Ben Skeggsbfd83ac2010-11-12 15:12:51 +1000162 *size = roundup(*size, 64 * nvbo->tile_mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100163
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000164 } else if (device->info.chipset >= 0x20) {
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100165 *align = 16384;
Ben Skeggsbfd83ac2010-11-12 15:12:51 +1000166 *size = roundup(*size, 64 * nvbo->tile_mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100167
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000168 } else if (device->info.chipset >= 0x10) {
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100169 *align = 16384;
Ben Skeggsbfd83ac2010-11-12 15:12:51 +1000170 *size = roundup(*size, 32 * nvbo->tile_mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100171 }
172 }
Ben Skeggsbfd83ac2010-11-12 15:12:51 +1000173 } else {
Ben Skeggsf91bac52011-06-06 14:15:46 +1000174 *size = roundup(*size, (1 << nvbo->page_shift));
175 *align = max((1 << nvbo->page_shift), *align);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100176 }
177
Maarten Maathuis1c7059e2009-12-25 18:51:17 +0100178 *size = roundup(*size, PAGE_SIZE);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100179}
180
Ben Skeggs6ee73862009-12-11 19:24:15 +1000181int
Ben Skeggs7375c952011-06-07 14:21:29 +1000182nouveau_bo_new(struct drm_device *dev, int size, int align,
183 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
Maarten Lankhorstbb6178b2014-01-09 11:03:15 +0100184 struct sg_table *sg, struct reservation_object *robj,
Ben Skeggs7375c952011-06-07 14:21:29 +1000185 struct nouveau_bo **pnvbo)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000186{
Ben Skeggs77145f12012-07-31 16:16:21 +1000187 struct nouveau_drm *drm = nouveau_drm(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000188 struct nouveau_bo *nvbo;
Jerome Glisse57de4ba2011-11-11 15:42:57 -0500189 size_t acc_size;
Ben Skeggsf91bac52011-06-06 14:15:46 +1000190 int ret;
Dave Airlie22b33e82012-04-02 11:53:06 +0100191 int type = ttm_bo_type_device;
Maarten Lankhorst35095f72013-07-27 10:17:12 +0200192 int lpg_shift = 12;
193 int max_size;
194
Ben Skeggs3ee6f5b2014-08-10 04:10:20 +1000195 if (drm->client.vm)
196 lpg_shift = drm->client.vm->vmm->lpg_shift;
Maarten Lankhorst35095f72013-07-27 10:17:12 +0200197 max_size = INT_MAX & ~((1 << lpg_shift) - 1);
Maarten Lankhorst0108bc82013-07-07 10:40:19 +0200198
199 if (size <= 0 || size > max_size) {
Ben Skeggsfa2bade2014-08-10 04:10:22 +1000200 NV_WARN(drm, "skipped size %x\n", (u32)size);
Maarten Lankhorst0108bc82013-07-07 10:40:19 +0200201 return -EINVAL;
202 }
Dave Airlie22b33e82012-04-02 11:53:06 +0100203
204 if (sg)
205 type = ttm_bo_type_sg;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000206
207 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
208 if (!nvbo)
209 return -ENOMEM;
210 INIT_LIST_HEAD(&nvbo->head);
211 INIT_LIST_HEAD(&nvbo->entry);
Ben Skeggsfd2871a2011-06-06 14:07:04 +1000212 INIT_LIST_HEAD(&nvbo->vma_list);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000213 nvbo->tile_mode = tile_mode;
214 nvbo->tile_flags = tile_flags;
Ben Skeggsebb945a2012-07-20 08:17:34 +1000215 nvbo->bo.bdev = &drm->ttm.bdev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000216
Ben Skeggsf91bac52011-06-06 14:15:46 +1000217 nvbo->page_shift = 12;
Ben Skeggs3ee6f5b2014-08-10 04:10:20 +1000218 if (drm->client.vm) {
Ben Skeggsf91bac52011-06-06 14:15:46 +1000219 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
Ben Skeggs3ee6f5b2014-08-10 04:10:20 +1000220 nvbo->page_shift = drm->client.vm->vmm->lpg_shift;
Ben Skeggsf91bac52011-06-06 14:15:46 +1000221 }
222
223 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
Ben Skeggsfd2871a2011-06-06 14:07:04 +1000224 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
225 nouveau_bo_placement_set(nvbo, flags, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000226
Ben Skeggsebb945a2012-07-20 08:17:34 +1000227 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
Jerome Glisse57de4ba2011-11-11 15:42:57 -0500228 sizeof(struct nouveau_bo));
229
Ben Skeggsebb945a2012-07-20 08:17:34 +1000230 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
Dave Airlie22b33e82012-04-02 11:53:06 +0100231 type, &nvbo->placement,
Marcin Slusarz0b91c4a2012-11-06 21:49:51 +0000232 align >> PAGE_SHIFT, false, NULL, acc_size, sg,
Maarten Lankhorstbb6178b2014-01-09 11:03:15 +0100233 robj, nouveau_bo_del_ttm);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000234 if (ret) {
235 /* ttm will call nouveau_bo_del_ttm if it fails.. */
236 return ret;
237 }
238
Ben Skeggs6ee73862009-12-11 19:24:15 +1000239 *pnvbo = nvbo;
240 return 0;
241}
242
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100243static void
Christian Königf1217ed2014-08-27 13:16:04 +0200244set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000245{
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100246 *n = 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000247
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100248 if (type & TTM_PL_FLAG_VRAM)
Christian Königf1217ed2014-08-27 13:16:04 +0200249 pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100250 if (type & TTM_PL_FLAG_TT)
Christian Königf1217ed2014-08-27 13:16:04 +0200251 pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100252 if (type & TTM_PL_FLAG_SYSTEM)
Christian Königf1217ed2014-08-27 13:16:04 +0200253 pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100254}
Ben Skeggs37cb3e082009-12-16 16:22:42 +1000255
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200256static void
257set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
258{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000259 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggsf392ec42014-08-10 04:10:28 +1000260 u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT;
Christian Königf1217ed2014-08-27 13:16:04 +0200261 unsigned i, fpfn, lpfn;
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200262
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000263 if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
Francisco Jerez812f2192011-02-03 01:49:33 +0100264 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
Francisco Jerez4beb1162011-11-06 21:21:28 +0100265 nvbo->bo.mem.num_pages < vram_pages / 4) {
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200266 /*
267 * Make sure that the color and depth buffers are handled
268 * by independent memory controller units. Up to a 9x
269 * speed up when alpha-blending and depth-test are enabled
270 * at the same time.
271 */
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200272 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
Christian Königf1217ed2014-08-27 13:16:04 +0200273 fpfn = vram_pages / 2;
274 lpfn = ~0;
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200275 } else {
Christian Königf1217ed2014-08-27 13:16:04 +0200276 fpfn = 0;
277 lpfn = vram_pages / 2;
278 }
279 for (i = 0; i < nvbo->placement.num_placement; ++i) {
280 nvbo->placements[i].fpfn = fpfn;
281 nvbo->placements[i].lpfn = lpfn;
282 }
283 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
284 nvbo->busy_placements[i].fpfn = fpfn;
285 nvbo->busy_placements[i].lpfn = lpfn;
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200286 }
287 }
288}
289
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100290void
291nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
292{
293 struct ttm_placement *pl = &nvbo->placement;
294 uint32_t flags = TTM_PL_MASK_CACHING |
295 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
296
297 pl->placement = nvbo->placements;
298 set_placement_list(nvbo->placements, &pl->num_placement,
299 type, flags);
300
301 pl->busy_placement = nvbo->busy_placements;
302 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
303 type | busy, flags);
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200304
305 set_placement_range(nvbo, type);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000306}
307
308int
309nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
310{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000311 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000312 struct ttm_buffer_object *bo = &nvbo->bo;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100313 int ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000314
Thierry Redingee3939e2014-07-21 13:15:51 +0200315 ret = ttm_bo_reserve(bo, false, false, false, NULL);
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100316 if (ret)
317 goto out;
318
Ben Skeggs6ee73862009-12-11 19:24:15 +1000319 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
Ben Skeggsebb945a2012-07-20 08:17:34 +1000320 NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
Ben Skeggs6ee73862009-12-11 19:24:15 +1000321 1 << bo->mem.mem_type, memtype);
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100322 ret = -EINVAL;
323 goto out;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000324 }
325
Alexandre Courbot5be5a152014-10-27 18:11:52 +0900326 if (nvbo->pin_refcnt)
327 goto ref_inc;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000328
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100329 nouveau_bo_placement_set(nvbo, memtype, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000330
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000331 ret = nouveau_bo_validate(nvbo, false, false);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000332 if (ret == 0) {
333 switch (bo->mem.mem_type) {
334 case TTM_PL_VRAM:
Ben Skeggsebb945a2012-07-20 08:17:34 +1000335 drm->gem.vram_available -= bo->mem.size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000336 break;
337 case TTM_PL_TT:
Ben Skeggsebb945a2012-07-20 08:17:34 +1000338 drm->gem.gart_available -= bo->mem.size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000339 break;
340 default:
341 break;
342 }
343 }
Alexandre Courbot5be5a152014-10-27 18:11:52 +0900344
345ref_inc:
346 nvbo->pin_refcnt++;
347
Ben Skeggs6ee73862009-12-11 19:24:15 +1000348out:
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100349 ttm_bo_unreserve(bo);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000350 return ret;
351}
352
353int
354nouveau_bo_unpin(struct nouveau_bo *nvbo)
355{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000356 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000357 struct ttm_buffer_object *bo = &nvbo->bo;
Maarten Lankhorst4f385592013-07-07 10:37:35 +0200358 int ret, ref;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000359
Thierry Redingee3939e2014-07-21 13:15:51 +0200360 ret = ttm_bo_reserve(bo, false, false, false, NULL);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000361 if (ret)
362 return ret;
363
Maarten Lankhorst4f385592013-07-07 10:37:35 +0200364 ref = --nvbo->pin_refcnt;
365 WARN_ON_ONCE(ref < 0);
366 if (ref)
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100367 goto out;
368
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100369 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000370
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000371 ret = nouveau_bo_validate(nvbo, false, false);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000372 if (ret == 0) {
373 switch (bo->mem.mem_type) {
374 case TTM_PL_VRAM:
Ben Skeggsebb945a2012-07-20 08:17:34 +1000375 drm->gem.vram_available += bo->mem.size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000376 break;
377 case TTM_PL_TT:
Ben Skeggsebb945a2012-07-20 08:17:34 +1000378 drm->gem.gart_available += bo->mem.size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000379 break;
380 default:
381 break;
382 }
383 }
384
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100385out:
Ben Skeggs6ee73862009-12-11 19:24:15 +1000386 ttm_bo_unreserve(bo);
387 return ret;
388}
389
390int
391nouveau_bo_map(struct nouveau_bo *nvbo)
392{
393 int ret;
394
Thierry Redingee3939e2014-07-21 13:15:51 +0200395 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000396 if (ret)
397 return ret;
398
399 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
400 ttm_bo_unreserve(&nvbo->bo);
401 return ret;
402}
403
404void
405nouveau_bo_unmap(struct nouveau_bo *nvbo)
406{
Ben Skeggs9d59e8a2010-08-27 13:04:41 +1000407 if (nvbo)
408 ttm_bo_kunmap(&nvbo->kmap);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000409}
410
Ben Skeggs7a45d762010-11-22 08:50:27 +1000411int
412nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000413 bool no_wait_gpu)
Ben Skeggs7a45d762010-11-22 08:50:27 +1000414{
415 int ret;
416
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000417 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
418 interruptible, no_wait_gpu);
Ben Skeggs7a45d762010-11-22 08:50:27 +1000419 if (ret)
420 return ret;
421
422 return 0;
423}
424
Ben Skeggs6ee73862009-12-11 19:24:15 +1000425u16
426nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
427{
428 bool is_iomem;
429 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
430 mem = &mem[index];
431 if (is_iomem)
432 return ioread16_native((void __force __iomem *)mem);
433 else
434 return *mem;
435}
436
437void
438nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
439{
440 bool is_iomem;
441 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
442 mem = &mem[index];
443 if (is_iomem)
444 iowrite16_native(val, (void __force __iomem *)mem);
445 else
446 *mem = val;
447}
448
449u32
450nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
451{
452 bool is_iomem;
453 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
454 mem = &mem[index];
455 if (is_iomem)
456 return ioread32_native((void __force __iomem *)mem);
457 else
458 return *mem;
459}
460
461void
462nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
463{
464 bool is_iomem;
465 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
466 mem = &mem[index];
467 if (is_iomem)
468 iowrite32_native(val, (void __force __iomem *)mem);
469 else
470 *mem = val;
471}
472
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400473static struct ttm_tt *
Ben Skeggsebb945a2012-07-20 08:17:34 +1000474nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
475 uint32_t page_flags, struct page *dummy_read)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000476{
Max Filippovdf1b4b92012-10-14 01:58:26 +0400477#if __OS_HAS_AGP
Ben Skeggsebb945a2012-07-20 08:17:34 +1000478 struct nouveau_drm *drm = nouveau_bdev(bdev);
479 struct drm_device *dev = drm->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000480
Ben Skeggsebb945a2012-07-20 08:17:34 +1000481 if (drm->agp.stat == ENABLED) {
482 return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
483 page_flags, dummy_read);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000484 }
Max Filippovdf1b4b92012-10-14 01:58:26 +0400485#endif
Ben Skeggs6ee73862009-12-11 19:24:15 +1000486
Ben Skeggsebb945a2012-07-20 08:17:34 +1000487 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000488}
489
490static int
491nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
492{
493 /* We'll do this from user space. */
494 return 0;
495}
496
497static int
498nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
499 struct ttm_mem_type_manager *man)
500{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000501 struct nouveau_drm *drm = nouveau_bdev(bdev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000502
503 switch (type) {
504 case TTM_PL_SYSTEM:
505 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
506 man->available_caching = TTM_PL_MASK_CACHING;
507 man->default_caching = TTM_PL_FLAG_CACHED;
508 break;
509 case TTM_PL_VRAM:
Alexandre Courbote2a4e782014-06-27 19:28:50 +0900510 man->flags = TTM_MEMTYPE_FLAG_FIXED |
511 TTM_MEMTYPE_FLAG_MAPPABLE;
512 man->available_caching = TTM_PL_FLAG_UNCACHED |
513 TTM_PL_FLAG_WC;
514 man->default_caching = TTM_PL_FLAG_WC;
515
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000516 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
Alexandre Courbote2a4e782014-06-27 19:28:50 +0900517 /* Some BARs do not support being ioremapped WC */
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000518 if (nvkm_bar(&drm->device)->iomap_uncached) {
Alexandre Courbote2a4e782014-06-27 19:28:50 +0900519 man->available_caching = TTM_PL_FLAG_UNCACHED;
520 man->default_caching = TTM_PL_FLAG_UNCACHED;
521 }
522
Ben Skeggs573a2a32010-08-25 15:26:04 +1000523 man->func = &nouveau_vram_manager;
Ben Skeggsf869ef82010-11-15 11:53:16 +1000524 man->io_reserve_fastpath = false;
525 man->use_io_reserve_lru = true;
526 } else {
Ben Skeggs573a2a32010-08-25 15:26:04 +1000527 man->func = &ttm_bo_manager_func;
Ben Skeggsf869ef82010-11-15 11:53:16 +1000528 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000529 break;
530 case TTM_PL_TT:
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000531 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
Ben Skeggs26c0c9e2011-02-10 12:59:51 +1000532 man->func = &nouveau_gart_manager;
533 else
Ben Skeggsebb945a2012-07-20 08:17:34 +1000534 if (drm->agp.stat != ENABLED)
Ben Skeggs3863c9b2012-07-14 19:09:17 +1000535 man->func = &nv04_gart_manager;
536 else
Ben Skeggs26c0c9e2011-02-10 12:59:51 +1000537 man->func = &ttm_bo_manager_func;
Ben Skeggsebb945a2012-07-20 08:17:34 +1000538
539 if (drm->agp.stat == ENABLED) {
Jerome Glissef32f02f2010-04-09 14:39:25 +0200540 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
Francisco Jereza3d487e2010-11-20 22:11:22 +0100541 man->available_caching = TTM_PL_FLAG_UNCACHED |
542 TTM_PL_FLAG_WC;
543 man->default_caching = TTM_PL_FLAG_WC;
Ben Skeggsebb945a2012-07-20 08:17:34 +1000544 } else {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000545 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
546 TTM_MEMTYPE_FLAG_CMA;
547 man->available_caching = TTM_PL_MASK_CACHING;
548 man->default_caching = TTM_PL_FLAG_CACHED;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000549 }
Ben Skeggsebb945a2012-07-20 08:17:34 +1000550
Ben Skeggs6ee73862009-12-11 19:24:15 +1000551 break;
552 default:
Ben Skeggs6ee73862009-12-11 19:24:15 +1000553 return -EINVAL;
554 }
555 return 0;
556}
557
558static void
559nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
560{
561 struct nouveau_bo *nvbo = nouveau_bo(bo);
562
563 switch (bo->mem.mem_type) {
Francisco Jerez22fbd532009-12-11 18:40:17 +0100564 case TTM_PL_VRAM:
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100565 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
566 TTM_PL_FLAG_SYSTEM);
Francisco Jerez22fbd532009-12-11 18:40:17 +0100567 break;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000568 default:
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100569 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000570 break;
571 }
Francisco Jerez22fbd532009-12-11 18:40:17 +0100572
573 *pl = nvbo->placement;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000574}
575
576
Ben Skeggs6ee73862009-12-11 19:24:15 +1000577static int
Ben Skeggs49981042012-08-06 19:38:25 +1000578nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
579{
580 int ret = RING_SPACE(chan, 2);
581 if (ret == 0) {
582 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
Ben Skeggs00fc6f62013-07-09 14:20:15 +1000583 OUT_RING (chan, handle & 0x0000ffff);
Ben Skeggs49981042012-08-06 19:38:25 +1000584 FIRE_RING (chan);
585 }
586 return ret;
587}
588
589static int
Ben Skeggsc6b7e892012-03-20 14:36:04 +1000590nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
591 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
592{
593 struct nouveau_mem *node = old_mem->mm_node;
594 int ret = RING_SPACE(chan, 10);
595 if (ret == 0) {
Ben Skeggs6d597022012-04-01 21:09:13 +1000596 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
Ben Skeggsc6b7e892012-03-20 14:36:04 +1000597 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
598 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
599 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
600 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
601 OUT_RING (chan, PAGE_SIZE);
602 OUT_RING (chan, PAGE_SIZE);
603 OUT_RING (chan, PAGE_SIZE);
604 OUT_RING (chan, new_mem->num_pages);
Ben Skeggs6d597022012-04-01 21:09:13 +1000605 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
Ben Skeggsc6b7e892012-03-20 14:36:04 +1000606 }
607 return ret;
608}
609
610static int
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000611nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
612{
613 int ret = RING_SPACE(chan, 2);
614 if (ret == 0) {
615 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
616 OUT_RING (chan, handle);
617 }
618 return ret;
619}
620
621static int
Ben Skeggs1a460982012-05-04 15:17:28 +1000622nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
623 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
624{
625 struct nouveau_mem *node = old_mem->mm_node;
626 u64 src_offset = node->vma[0].offset;
627 u64 dst_offset = node->vma[1].offset;
628 u32 page_count = new_mem->num_pages;
629 int ret;
630
631 page_count = new_mem->num_pages;
632 while (page_count) {
633 int line_count = (page_count > 8191) ? 8191 : page_count;
634
635 ret = RING_SPACE(chan, 11);
636 if (ret)
637 return ret;
638
639 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
640 OUT_RING (chan, upper_32_bits(src_offset));
641 OUT_RING (chan, lower_32_bits(src_offset));
642 OUT_RING (chan, upper_32_bits(dst_offset));
643 OUT_RING (chan, lower_32_bits(dst_offset));
644 OUT_RING (chan, PAGE_SIZE);
645 OUT_RING (chan, PAGE_SIZE);
646 OUT_RING (chan, PAGE_SIZE);
647 OUT_RING (chan, line_count);
648 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
649 OUT_RING (chan, 0x00000110);
650
651 page_count -= line_count;
652 src_offset += (PAGE_SIZE * line_count);
653 dst_offset += (PAGE_SIZE * line_count);
654 }
655
656 return 0;
657}
658
659static int
Ben Skeggs183720b2010-12-09 15:17:10 +1000660nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
661 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
662{
Ben Skeggsd2f966662011-06-06 20:54:42 +1000663 struct nouveau_mem *node = old_mem->mm_node;
664 u64 src_offset = node->vma[0].offset;
665 u64 dst_offset = node->vma[1].offset;
Ben Skeggs183720b2010-12-09 15:17:10 +1000666 u32 page_count = new_mem->num_pages;
667 int ret;
668
Ben Skeggs183720b2010-12-09 15:17:10 +1000669 page_count = new_mem->num_pages;
670 while (page_count) {
671 int line_count = (page_count > 2047) ? 2047 : page_count;
672
673 ret = RING_SPACE(chan, 12);
674 if (ret)
675 return ret;
676
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000677 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
Ben Skeggs183720b2010-12-09 15:17:10 +1000678 OUT_RING (chan, upper_32_bits(dst_offset));
679 OUT_RING (chan, lower_32_bits(dst_offset));
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000680 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
Ben Skeggs183720b2010-12-09 15:17:10 +1000681 OUT_RING (chan, upper_32_bits(src_offset));
682 OUT_RING (chan, lower_32_bits(src_offset));
683 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
684 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
685 OUT_RING (chan, PAGE_SIZE); /* line_length */
686 OUT_RING (chan, line_count);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000687 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
Ben Skeggs183720b2010-12-09 15:17:10 +1000688 OUT_RING (chan, 0x00100110);
689
690 page_count -= line_count;
691 src_offset += (PAGE_SIZE * line_count);
692 dst_offset += (PAGE_SIZE * line_count);
693 }
694
695 return 0;
696}
697
698static int
Ben Skeggsfdf53242012-05-04 15:15:12 +1000699nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
700 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
701{
702 struct nouveau_mem *node = old_mem->mm_node;
703 u64 src_offset = node->vma[0].offset;
704 u64 dst_offset = node->vma[1].offset;
705 u32 page_count = new_mem->num_pages;
706 int ret;
707
708 page_count = new_mem->num_pages;
709 while (page_count) {
710 int line_count = (page_count > 8191) ? 8191 : page_count;
711
712 ret = RING_SPACE(chan, 11);
713 if (ret)
714 return ret;
715
716 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
717 OUT_RING (chan, upper_32_bits(src_offset));
718 OUT_RING (chan, lower_32_bits(src_offset));
719 OUT_RING (chan, upper_32_bits(dst_offset));
720 OUT_RING (chan, lower_32_bits(dst_offset));
721 OUT_RING (chan, PAGE_SIZE);
722 OUT_RING (chan, PAGE_SIZE);
723 OUT_RING (chan, PAGE_SIZE);
724 OUT_RING (chan, line_count);
725 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
726 OUT_RING (chan, 0x00000110);
727
728 page_count -= line_count;
729 src_offset += (PAGE_SIZE * line_count);
730 dst_offset += (PAGE_SIZE * line_count);
731 }
732
733 return 0;
734}
735
736static int
Ben Skeggs5490e5d2012-05-04 14:34:16 +1000737nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
738 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
739{
740 struct nouveau_mem *node = old_mem->mm_node;
741 int ret = RING_SPACE(chan, 7);
742 if (ret == 0) {
743 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
744 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
745 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
746 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
747 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
748 OUT_RING (chan, 0x00000000 /* COPY */);
749 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
750 }
751 return ret;
752}
753
754static int
Ben Skeggs4c193d22012-05-04 14:21:15 +1000755nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
756 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
757{
758 struct nouveau_mem *node = old_mem->mm_node;
759 int ret = RING_SPACE(chan, 7);
760 if (ret == 0) {
761 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
762 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
763 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
764 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
765 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
766 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
767 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
768 }
769 return ret;
770}
771
772static int
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000773nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
774{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000775 int ret = RING_SPACE(chan, 6);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000776 if (ret == 0) {
Ben Skeggsebb945a2012-07-20 08:17:34 +1000777 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
778 OUT_RING (chan, handle);
779 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
Ben Skeggsf45f55c2014-08-10 04:10:23 +1000780 OUT_RING (chan, chan->drm->ntfy.handle);
781 OUT_RING (chan, chan->vram.handle);
782 OUT_RING (chan, chan->vram.handle);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000783 }
784
785 return ret;
786}
787
788static int
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000789nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
790 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000791{
Ben Skeggsd2f966662011-06-06 20:54:42 +1000792 struct nouveau_mem *node = old_mem->mm_node;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000793 u64 length = (new_mem->num_pages << PAGE_SHIFT);
Ben Skeggsd2f966662011-06-06 20:54:42 +1000794 u64 src_offset = node->vma[0].offset;
795 u64 dst_offset = node->vma[1].offset;
Maarten Lankhorstce8f7692013-11-12 13:34:08 +0100796 int src_tiled = !!node->memtype;
797 int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000798 int ret;
799
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000800 while (length) {
801 u32 amount, stride, height;
802
Maarten Lankhorstce8f7692013-11-12 13:34:08 +0100803 ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
804 if (ret)
805 return ret;
806
Ben Skeggs5220b3c2010-09-23 15:21:17 +1000807 amount = min(length, (u64)(4 * 1024 * 1024));
808 stride = 16 * 4;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000809 height = amount / stride;
810
Maarten Lankhorstce8f7692013-11-12 13:34:08 +0100811 if (src_tiled) {
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000812 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000813 OUT_RING (chan, 0);
Ben Skeggs5220b3c2010-09-23 15:21:17 +1000814 OUT_RING (chan, 0);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000815 OUT_RING (chan, stride);
816 OUT_RING (chan, height);
817 OUT_RING (chan, 1);
818 OUT_RING (chan, 0);
819 OUT_RING (chan, 0);
820 } else {
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000821 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000822 OUT_RING (chan, 1);
823 }
Maarten Lankhorstce8f7692013-11-12 13:34:08 +0100824 if (dst_tiled) {
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000825 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000826 OUT_RING (chan, 0);
Ben Skeggs5220b3c2010-09-23 15:21:17 +1000827 OUT_RING (chan, 0);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000828 OUT_RING (chan, stride);
829 OUT_RING (chan, height);
830 OUT_RING (chan, 1);
831 OUT_RING (chan, 0);
832 OUT_RING (chan, 0);
833 } else {
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000834 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000835 OUT_RING (chan, 1);
836 }
837
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000838 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000839 OUT_RING (chan, upper_32_bits(src_offset));
840 OUT_RING (chan, upper_32_bits(dst_offset));
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000841 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000842 OUT_RING (chan, lower_32_bits(src_offset));
843 OUT_RING (chan, lower_32_bits(dst_offset));
844 OUT_RING (chan, stride);
845 OUT_RING (chan, stride);
846 OUT_RING (chan, stride);
847 OUT_RING (chan, height);
848 OUT_RING (chan, 0x00000101);
849 OUT_RING (chan, 0x00000000);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000850 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000851 OUT_RING (chan, 0);
852
853 length -= amount;
854 src_offset += amount;
855 dst_offset += amount;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000856 }
857
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000858 return 0;
859}
860
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000861static int
862nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
863{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000864 int ret = RING_SPACE(chan, 4);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000865 if (ret == 0) {
Ben Skeggsebb945a2012-07-20 08:17:34 +1000866 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
867 OUT_RING (chan, handle);
868 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
Ben Skeggsf45f55c2014-08-10 04:10:23 +1000869 OUT_RING (chan, chan->drm->ntfy.handle);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000870 }
871
872 return ret;
873}
874
Ben Skeggsa6704782011-02-16 09:10:20 +1000875static inline uint32_t
876nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
877 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
878{
879 if (mem->mem_type == TTM_PL_TT)
Ben Skeggsebb945a2012-07-20 08:17:34 +1000880 return NvDmaTT;
Ben Skeggsf45f55c2014-08-10 04:10:23 +1000881 return chan->vram.handle;
Ben Skeggsa6704782011-02-16 09:10:20 +1000882}
883
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000884static int
885nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
886 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
887{
Ben Skeggsd961db72010-08-05 10:48:18 +1000888 u32 src_offset = old_mem->start << PAGE_SHIFT;
889 u32 dst_offset = new_mem->start << PAGE_SHIFT;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000890 u32 page_count = new_mem->num_pages;
891 int ret;
892
893 ret = RING_SPACE(chan, 3);
894 if (ret)
895 return ret;
896
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000897 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000898 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
899 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
900
Ben Skeggs6ee73862009-12-11 19:24:15 +1000901 page_count = new_mem->num_pages;
902 while (page_count) {
903 int line_count = (page_count > 2047) ? 2047 : page_count;
904
Ben Skeggs6ee73862009-12-11 19:24:15 +1000905 ret = RING_SPACE(chan, 11);
906 if (ret)
907 return ret;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000908
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000909 BEGIN_NV04(chan, NvSubCopy,
Ben Skeggs6ee73862009-12-11 19:24:15 +1000910 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000911 OUT_RING (chan, src_offset);
912 OUT_RING (chan, dst_offset);
913 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
914 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
915 OUT_RING (chan, PAGE_SIZE); /* line_length */
916 OUT_RING (chan, line_count);
917 OUT_RING (chan, 0x00000101);
918 OUT_RING (chan, 0x00000000);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000919 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000920 OUT_RING (chan, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000921
922 page_count -= line_count;
923 src_offset += (PAGE_SIZE * line_count);
924 dst_offset += (PAGE_SIZE * line_count);
925 }
926
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000927 return 0;
928}
929
930static int
Ben Skeggs3c57d852013-11-22 10:35:25 +1000931nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
932 struct ttm_mem_reg *mem)
Ben Skeggsd2f966662011-06-06 20:54:42 +1000933{
Ben Skeggs3c57d852013-11-22 10:35:25 +1000934 struct nouveau_mem *old_node = bo->mem.mm_node;
935 struct nouveau_mem *new_node = mem->mm_node;
936 u64 size = (u64)mem->num_pages << PAGE_SHIFT;
Ben Skeggsd2f966662011-06-06 20:54:42 +1000937 int ret;
938
Ben Skeggs3ee6f5b2014-08-10 04:10:20 +1000939 ret = nouveau_vm_get(drm->client.vm, size, old_node->page_shift,
Ben Skeggs3c57d852013-11-22 10:35:25 +1000940 NV_MEM_ACCESS_RW, &old_node->vma[0]);
Ben Skeggsd2f966662011-06-06 20:54:42 +1000941 if (ret)
942 return ret;
943
Ben Skeggs3ee6f5b2014-08-10 04:10:20 +1000944 ret = nouveau_vm_get(drm->client.vm, size, new_node->page_shift,
Ben Skeggs3c57d852013-11-22 10:35:25 +1000945 NV_MEM_ACCESS_RW, &old_node->vma[1]);
946 if (ret) {
947 nouveau_vm_put(&old_node->vma[0]);
948 return ret;
949 }
950
951 nouveau_vm_map(&old_node->vma[0], old_node);
952 nouveau_vm_map(&old_node->vma[1], new_node);
Ben Skeggsd2f966662011-06-06 20:54:42 +1000953 return 0;
954}
955
956static int
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000957nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000958 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000959{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000960 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
Dave Jones1934a2a2013-09-17 17:26:34 -0400961 struct nouveau_channel *chan = drm->ttm.chan;
Ben Skeggs0ad72862014-08-10 04:10:22 +1000962 struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
Ben Skeggs35b81412013-11-22 10:39:57 +1000963 struct nouveau_fence *fence;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000964 int ret;
965
Ben Skeggsd2f966662011-06-06 20:54:42 +1000966 /* create temporary vmas for the transfer and attach them to the
967 * old nouveau_mem node, these will get cleaned up after ttm has
968 * destroyed the ttm_mem_reg
Ben Skeggs3425df42011-02-10 11:22:12 +1000969 */
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000970 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
Ben Skeggs3c57d852013-11-22 10:35:25 +1000971 ret = nouveau_bo_move_prep(drm, bo, new_mem);
Ben Skeggsd2f966662011-06-06 20:54:42 +1000972 if (ret)
Ben Skeggs3c57d852013-11-22 10:35:25 +1000973 return ret;
Ben Skeggs3425df42011-02-10 11:22:12 +1000974 }
975
Ben Skeggs0ad72862014-08-10 04:10:22 +1000976 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
Maarten Lankhorste3be4c22014-09-16 11:15:07 +0200977 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
Ben Skeggs6a6b73f2010-10-05 16:53:48 +1000978 if (ret == 0) {
Ben Skeggs35b81412013-11-22 10:39:57 +1000979 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
980 if (ret == 0) {
981 ret = nouveau_fence_new(chan, false, &fence);
982 if (ret == 0) {
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +0200983 ret = ttm_bo_move_accel_cleanup(bo,
984 &fence->base,
Ben Skeggs35b81412013-11-22 10:39:57 +1000985 evict,
986 no_wait_gpu,
987 new_mem);
988 nouveau_fence_unref(&fence);
989 }
990 }
Ben Skeggs6a6b73f2010-10-05 16:53:48 +1000991 }
Ben Skeggs0ad72862014-08-10 04:10:22 +1000992 mutex_unlock(&cli->mutex);
Ben Skeggs6a6b73f2010-10-05 16:53:48 +1000993 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000994}
995
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000996void
Ben Skeggs49981042012-08-06 19:38:25 +1000997nouveau_bo_move_init(struct nouveau_drm *drm)
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000998{
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000999 static const struct {
1000 const char *name;
Ben Skeggs1a460982012-05-04 15:17:28 +10001001 int engine;
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001002 u32 oclass;
1003 int (*exec)(struct nouveau_channel *,
1004 struct ttm_buffer_object *,
1005 struct ttm_mem_reg *, struct ttm_mem_reg *);
1006 int (*init)(struct nouveau_channel *, u32 handle);
1007 } _methods[] = {
Ben Skeggs00fc6f62013-07-09 14:20:15 +10001008 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
Ben Skeggs49981042012-08-06 19:38:25 +10001009 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
Ben Skeggs1a460982012-05-04 15:17:28 +10001010 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1011 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1012 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1013 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1014 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1015 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1016 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
Ben Skeggs5490e5d2012-05-04 14:34:16 +10001017 {},
Ben Skeggs1a460982012-05-04 15:17:28 +10001018 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001019 }, *mthd = _methods;
1020 const char *name = "CPU";
1021 int ret;
1022
1023 do {
Ben Skeggs49981042012-08-06 19:38:25 +10001024 struct nouveau_channel *chan;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001025
Ben Skeggs00fc6f62013-07-09 14:20:15 +10001026 if (mthd->engine)
Ben Skeggs49981042012-08-06 19:38:25 +10001027 chan = drm->cechan;
1028 else
1029 chan = drm->channel;
1030 if (chan == NULL)
1031 continue;
1032
Ben Skeggs0ad72862014-08-10 04:10:22 +10001033 ret = nvif_object_init(chan->object, NULL,
1034 mthd->oclass | (mthd->engine << 16),
1035 mthd->oclass, NULL, 0,
1036 &drm->ttm.copy);
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001037 if (ret == 0) {
Ben Skeggs0ad72862014-08-10 04:10:22 +10001038 ret = mthd->init(chan, drm->ttm.copy.handle);
Ben Skeggsebb945a2012-07-20 08:17:34 +10001039 if (ret) {
Ben Skeggs0ad72862014-08-10 04:10:22 +10001040 nvif_object_fini(&drm->ttm.copy);
Ben Skeggsebb945a2012-07-20 08:17:34 +10001041 continue;
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001042 }
Ben Skeggsebb945a2012-07-20 08:17:34 +10001043
1044 drm->ttm.move = mthd->exec;
Ben Skeggs1bb3f6a2013-07-08 10:40:35 +10001045 drm->ttm.chan = chan;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001046 name = mthd->name;
1047 break;
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001048 }
1049 } while ((++mthd)->exec);
1050
Ben Skeggsebb945a2012-07-20 08:17:34 +10001051 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001052}
1053
Ben Skeggs6ee73862009-12-11 19:24:15 +10001054static int
1055nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001056 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
Ben Skeggs6ee73862009-12-11 19:24:15 +10001057{
Christian Königf1217ed2014-08-27 13:16:04 +02001058 struct ttm_place placement_memtype = {
1059 .fpfn = 0,
1060 .lpfn = 0,
1061 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1062 };
Ben Skeggs6ee73862009-12-11 19:24:15 +10001063 struct ttm_placement placement;
1064 struct ttm_mem_reg tmp_mem;
1065 int ret;
1066
Ben Skeggs6ee73862009-12-11 19:24:15 +10001067 placement.num_placement = placement.num_busy_placement = 1;
Francisco Jerez77e2b5e2009-12-16 19:05:00 +01001068 placement.placement = placement.busy_placement = &placement_memtype;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001069
1070 tmp_mem = *new_mem;
1071 tmp_mem.mm_node = NULL;
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001072 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001073 if (ret)
1074 return ret;
1075
1076 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
1077 if (ret)
1078 goto out;
1079
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001080 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001081 if (ret)
1082 goto out;
1083
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001084 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001085out:
Ben Skeggs42311ff2010-08-04 12:07:08 +10001086 ttm_bo_mem_put(bo, &tmp_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001087 return ret;
1088}
1089
1090static int
1091nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001092 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
Ben Skeggs6ee73862009-12-11 19:24:15 +10001093{
Christian Königf1217ed2014-08-27 13:16:04 +02001094 struct ttm_place placement_memtype = {
1095 .fpfn = 0,
1096 .lpfn = 0,
1097 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1098 };
Ben Skeggs6ee73862009-12-11 19:24:15 +10001099 struct ttm_placement placement;
1100 struct ttm_mem_reg tmp_mem;
1101 int ret;
1102
Ben Skeggs6ee73862009-12-11 19:24:15 +10001103 placement.num_placement = placement.num_busy_placement = 1;
Francisco Jerez77e2b5e2009-12-16 19:05:00 +01001104 placement.placement = placement.busy_placement = &placement_memtype;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001105
1106 tmp_mem = *new_mem;
1107 tmp_mem.mm_node = NULL;
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001108 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001109 if (ret)
1110 return ret;
1111
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001112 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001113 if (ret)
1114 goto out;
1115
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001116 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001117 if (ret)
1118 goto out;
1119
1120out:
Ben Skeggs42311ff2010-08-04 12:07:08 +10001121 ttm_bo_mem_put(bo, &tmp_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001122 return ret;
1123}
1124
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001125static void
1126nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1127{
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001128 struct nouveau_bo *nvbo = nouveau_bo(bo);
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001129 struct nouveau_vma *vma;
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001130
Ben Skeggs9f1feed2012-01-25 15:34:22 +10001131 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1132 if (bo->destroy != nouveau_bo_del_ttm)
1133 return;
1134
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001135 list_for_each_entry(vma, &nvbo->vma_list, head) {
Ben Skeggs2e2cfbe2013-11-15 11:56:49 +10001136 if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
1137 (new_mem->mem_type == TTM_PL_VRAM ||
1138 nvbo->page_shift != vma->vm->vmm->lpg_shift)) {
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001139 nouveau_vm_map(vma, new_mem->mm_node);
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001140 } else {
1141 nouveau_vm_unmap(vma);
1142 }
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001143 }
1144}
1145
Ben Skeggs6ee73862009-12-11 19:24:15 +10001146static int
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001147nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
Ben Skeggsebb945a2012-07-20 08:17:34 +10001148 struct nouveau_drm_tile **new_tile)
Ben Skeggs6ee73862009-12-11 19:24:15 +10001149{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001150 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1151 struct drm_device *dev = drm->dev;
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001152 struct nouveau_bo *nvbo = nouveau_bo(bo);
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001153 u64 offset = new_mem->start << PAGE_SHIFT;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001154
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001155 *new_tile = NULL;
1156 if (new_mem->mem_type != TTM_PL_VRAM)
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001157 return 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001158
Ben Skeggs967e7bd2014-08-10 04:10:22 +10001159 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
Ben Skeggsbc9e7b92012-07-19 17:54:21 +10001160 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
Francisco Jereza5cf68b2010-10-24 16:14:41 +02001161 nvbo->tile_mode,
1162 nvbo->tile_flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001163 }
1164
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001165 return 0;
1166}
Ben Skeggs6ee73862009-12-11 19:24:15 +10001167
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001168static void
1169nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
Ben Skeggsebb945a2012-07-20 08:17:34 +10001170 struct nouveau_drm_tile *new_tile,
1171 struct nouveau_drm_tile **old_tile)
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001172{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001173 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1174 struct drm_device *dev = drm->dev;
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +02001175 struct fence *fence = reservation_object_get_excl(bo->resv);
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001176
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +02001177 nv10_bo_put_tile_region(dev, *old_tile, fence);
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001178 *old_tile = new_tile;
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001179}
1180
1181static int
1182nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001183 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001184{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001185 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001186 struct nouveau_bo *nvbo = nouveau_bo(bo);
1187 struct ttm_mem_reg *old_mem = &bo->mem;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001188 struct nouveau_drm_tile *new_tile = NULL;
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001189 int ret = 0;
1190
Alexandre Courbot5be5a152014-10-27 18:11:52 +09001191 if (nvbo->pin_refcnt)
1192 NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
1193
Ben Skeggs967e7bd2014-08-10 04:10:22 +10001194 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001195 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1196 if (ret)
1197 return ret;
1198 }
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001199
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001200 /* Fake bo copy. */
Ben Skeggs6ee73862009-12-11 19:24:15 +10001201 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1202 BUG_ON(bo->mem.mm_node != NULL);
1203 bo->mem = *new_mem;
1204 new_mem->mm_node = NULL;
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001205 goto out;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001206 }
1207
Ben Skeggscef9e992013-11-22 10:52:54 +10001208 /* Hardware assisted copy. */
1209 if (drm->ttm.move) {
1210 if (new_mem->mem_type == TTM_PL_SYSTEM)
1211 ret = nouveau_bo_move_flipd(bo, evict, intr,
1212 no_wait_gpu, new_mem);
1213 else if (old_mem->mem_type == TTM_PL_SYSTEM)
1214 ret = nouveau_bo_move_flips(bo, evict, intr,
1215 no_wait_gpu, new_mem);
1216 else
1217 ret = nouveau_bo_move_m2mf(bo, evict, intr,
1218 no_wait_gpu, new_mem);
1219 if (!ret)
1220 goto out;
Ben Skeggsb8a6a802010-08-27 11:55:43 +10001221 }
1222
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001223 /* Fallback to software copy. */
Ben Skeggscef9e992013-11-22 10:52:54 +10001224 ret = ttm_bo_wait(bo, true, intr, no_wait_gpu);
Ben Skeggscef9e992013-11-22 10:52:54 +10001225 if (ret == 0)
1226 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001227
1228out:
Ben Skeggs967e7bd2014-08-10 04:10:22 +10001229 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001230 if (ret)
1231 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1232 else
1233 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1234 }
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001235
1236 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001237}
1238
1239static int
1240nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1241{
David Herrmannacb46522013-08-25 18:28:59 +02001242 struct nouveau_bo *nvbo = nouveau_bo(bo);
1243
David Herrmann55fb74a2013-10-02 10:15:17 +02001244 return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001245}
1246
Jerome Glissef32f02f2010-04-09 14:39:25 +02001247static int
1248nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1249{
1250 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
Ben Skeggsebb945a2012-07-20 08:17:34 +10001251 struct nouveau_drm *drm = nouveau_bdev(bdev);
Maarten Lankhorsta5540902013-11-12 13:34:09 +01001252 struct nouveau_mem *node = mem->mm_node;
Ben Skeggsf869ef82010-11-15 11:53:16 +10001253 int ret;
Jerome Glissef32f02f2010-04-09 14:39:25 +02001254
1255 mem->bus.addr = NULL;
1256 mem->bus.offset = 0;
1257 mem->bus.size = mem->num_pages << PAGE_SHIFT;
1258 mem->bus.base = 0;
1259 mem->bus.is_iomem = false;
1260 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1261 return -EINVAL;
1262 switch (mem->mem_type) {
1263 case TTM_PL_SYSTEM:
1264 /* System memory */
1265 return 0;
1266 case TTM_PL_TT:
1267#if __OS_HAS_AGP
Ben Skeggsebb945a2012-07-20 08:17:34 +10001268 if (drm->agp.stat == ENABLED) {
Ben Skeggsd961db72010-08-05 10:48:18 +10001269 mem->bus.offset = mem->start << PAGE_SHIFT;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001270 mem->bus.base = drm->agp.base;
Ben Skeggs5c13cac2014-08-10 12:39:09 +10001271 mem->bus.is_iomem = !drm->dev->agp->cant_use_aperture;
Jerome Glissef32f02f2010-04-09 14:39:25 +02001272 }
1273#endif
Ben Skeggs967e7bd2014-08-10 04:10:22 +10001274 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
Maarten Lankhorsta5540902013-11-12 13:34:09 +01001275 /* untiled */
1276 break;
1277 /* fallthrough, tiled memory */
Jerome Glissef32f02f2010-04-09 14:39:25 +02001278 case TTM_PL_VRAM:
Ben Skeggs3863c9b2012-07-14 19:09:17 +10001279 mem->bus.offset = mem->start << PAGE_SHIFT;
Ben Skeggs967e7bd2014-08-10 04:10:22 +10001280 mem->bus.base = nv_device_resource_start(nvkm_device(&drm->device), 1);
Jerome Glissef32f02f2010-04-09 14:39:25 +02001281 mem->bus.is_iomem = true;
Ben Skeggs967e7bd2014-08-10 04:10:22 +10001282 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1283 struct nouveau_bar *bar = nvkm_bar(&drm->device);
Ben Skeggs3863c9b2012-07-14 19:09:17 +10001284
Ben Skeggsebb945a2012-07-20 08:17:34 +10001285 ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
Ben Skeggs3863c9b2012-07-14 19:09:17 +10001286 &node->bar_vma);
1287 if (ret)
1288 return ret;
1289
1290 mem->bus.offset = node->bar_vma.offset;
1291 }
Jerome Glissef32f02f2010-04-09 14:39:25 +02001292 break;
1293 default:
1294 return -EINVAL;
1295 }
1296 return 0;
1297}
1298
1299static void
1300nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1301{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001302 struct nouveau_drm *drm = nouveau_bdev(bdev);
Ben Skeggs967e7bd2014-08-10 04:10:22 +10001303 struct nouveau_bar *bar = nvkm_bar(&drm->device);
Ben Skeggsd5f42392011-02-10 12:22:52 +10001304 struct nouveau_mem *node = mem->mm_node;
Ben Skeggsf869ef82010-11-15 11:53:16 +10001305
Ben Skeggsd5f42392011-02-10 12:22:52 +10001306 if (!node->bar_vma.node)
Ben Skeggsf869ef82010-11-15 11:53:16 +10001307 return;
1308
Ben Skeggsebb945a2012-07-20 08:17:34 +10001309 bar->unmap(bar, &node->bar_vma);
Jerome Glissef32f02f2010-04-09 14:39:25 +02001310}
1311
1312static int
1313nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1314{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001315 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
Ben Skeggse1429b42010-09-10 11:12:25 +10001316 struct nouveau_bo *nvbo = nouveau_bo(bo);
Ben Skeggs967e7bd2014-08-10 04:10:22 +10001317 struct nvif_device *device = &drm->device;
1318 u32 mappable = nv_device_resource_len(nvkm_device(device), 1) >> PAGE_SHIFT;
Christian Königf1217ed2014-08-27 13:16:04 +02001319 int i, ret;
Ben Skeggse1429b42010-09-10 11:12:25 +10001320
1321 /* as long as the bo isn't in vram, and isn't tiled, we've got
1322 * nothing to do here.
1323 */
1324 if (bo->mem.mem_type != TTM_PL_VRAM) {
Ben Skeggs967e7bd2014-08-10 04:10:22 +10001325 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA ||
Francisco Jerezf13b3262010-10-10 06:01:08 +02001326 !nouveau_bo_tile_layout(nvbo))
Ben Skeggse1429b42010-09-10 11:12:25 +10001327 return 0;
Maarten Lankhorsta5540902013-11-12 13:34:09 +01001328
1329 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1330 nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1331
1332 ret = nouveau_bo_validate(nvbo, false, false);
1333 if (ret)
1334 return ret;
1335 }
1336 return 0;
Ben Skeggse1429b42010-09-10 11:12:25 +10001337 }
1338
1339 /* make sure bo is in mappable vram */
Ben Skeggs967e7bd2014-08-10 04:10:22 +10001340 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
Maarten Lankhorsta5540902013-11-12 13:34:09 +01001341 bo->mem.start + bo->mem.num_pages < mappable)
Ben Skeggse1429b42010-09-10 11:12:25 +10001342 return 0;
1343
Christian Königf1217ed2014-08-27 13:16:04 +02001344 for (i = 0; i < nvbo->placement.num_placement; ++i) {
1345 nvbo->placements[i].fpfn = 0;
1346 nvbo->placements[i].lpfn = mappable;
1347 }
Ben Skeggse1429b42010-09-10 11:12:25 +10001348
Christian Königf1217ed2014-08-27 13:16:04 +02001349 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1350 nvbo->busy_placements[i].fpfn = 0;
1351 nvbo->busy_placements[i].lpfn = mappable;
1352 }
1353
Dave Airliec2848152012-05-18 15:31:12 +01001354 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001355 return nouveau_bo_validate(nvbo, false, false);
Jerome Glissef32f02f2010-04-09 14:39:25 +02001356}
1357
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001358static int
1359nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1360{
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001361 struct ttm_dma_tt *ttm_dma = (void *)ttm;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001362 struct nouveau_drm *drm;
Alexandre Courbot420b9462014-02-17 15:17:26 +09001363 struct nouveau_device *device;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001364 struct drm_device *dev;
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001365 struct device *pdev;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001366 unsigned i;
1367 int r;
Dave Airlie22b33e82012-04-02 11:53:06 +01001368 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001369
1370 if (ttm->state != tt_unpopulated)
1371 return 0;
1372
Dave Airlie22b33e82012-04-02 11:53:06 +01001373 if (slave && ttm->sg) {
1374 /* make userspace faulting work */
1375 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1376 ttm_dma->dma_address, ttm->num_pages);
1377 ttm->state = tt_unbound;
1378 return 0;
1379 }
1380
Ben Skeggsebb945a2012-07-20 08:17:34 +10001381 drm = nouveau_bdev(ttm->bdev);
Ben Skeggs967e7bd2014-08-10 04:10:22 +10001382 device = nvkm_device(&drm->device);
Ben Skeggsebb945a2012-07-20 08:17:34 +10001383 dev = drm->dev;
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001384 pdev = nv_device_base(device);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001385
Jerome Glissedea7e0a2012-01-03 17:37:37 -05001386#if __OS_HAS_AGP
Ben Skeggsebb945a2012-07-20 08:17:34 +10001387 if (drm->agp.stat == ENABLED) {
Jerome Glissedea7e0a2012-01-03 17:37:37 -05001388 return ttm_agp_tt_populate(ttm);
1389 }
1390#endif
1391
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001392#ifdef CONFIG_SWIOTLB
1393 if (swiotlb_nr_tbl()) {
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001394 return ttm_dma_populate((void *)ttm, dev->dev);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001395 }
1396#endif
1397
1398 r = ttm_pool_populate(ttm);
1399 if (r) {
1400 return r;
1401 }
1402
1403 for (i = 0; i < ttm->num_pages; i++) {
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001404 dma_addr_t addr;
1405
1406 addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
1407 DMA_BIDIRECTIONAL);
1408
1409 if (dma_mapping_error(pdev, addr)) {
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001410 while (--i) {
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001411 dma_unmap_page(pdev, ttm_dma->dma_address[i],
1412 PAGE_SIZE, DMA_BIDIRECTIONAL);
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001413 ttm_dma->dma_address[i] = 0;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001414 }
1415 ttm_pool_unpopulate(ttm);
1416 return -EFAULT;
1417 }
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001418
1419 ttm_dma->dma_address[i] = addr;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001420 }
1421 return 0;
1422}
1423
1424static void
1425nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1426{
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001427 struct ttm_dma_tt *ttm_dma = (void *)ttm;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001428 struct nouveau_drm *drm;
Alexandre Courbot420b9462014-02-17 15:17:26 +09001429 struct nouveau_device *device;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001430 struct drm_device *dev;
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001431 struct device *pdev;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001432 unsigned i;
Dave Airlie22b33e82012-04-02 11:53:06 +01001433 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1434
1435 if (slave)
1436 return;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001437
Ben Skeggsebb945a2012-07-20 08:17:34 +10001438 drm = nouveau_bdev(ttm->bdev);
Ben Skeggs967e7bd2014-08-10 04:10:22 +10001439 device = nvkm_device(&drm->device);
Ben Skeggsebb945a2012-07-20 08:17:34 +10001440 dev = drm->dev;
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001441 pdev = nv_device_base(device);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001442
Jerome Glissedea7e0a2012-01-03 17:37:37 -05001443#if __OS_HAS_AGP
Ben Skeggsebb945a2012-07-20 08:17:34 +10001444 if (drm->agp.stat == ENABLED) {
Jerome Glissedea7e0a2012-01-03 17:37:37 -05001445 ttm_agp_tt_unpopulate(ttm);
1446 return;
1447 }
1448#endif
1449
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001450#ifdef CONFIG_SWIOTLB
1451 if (swiotlb_nr_tbl()) {
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001452 ttm_dma_unpopulate((void *)ttm, dev->dev);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001453 return;
1454 }
1455#endif
1456
1457 for (i = 0; i < ttm->num_pages; i++) {
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001458 if (ttm_dma->dma_address[i]) {
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001459 dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
1460 DMA_BIDIRECTIONAL);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001461 }
1462 }
1463
1464 ttm_pool_unpopulate(ttm);
1465}
1466
Maarten Lankhorstdd7cfd62014-01-21 13:07:31 +01001467void
Maarten Lankhorst809e9442014-04-09 16:19:30 +02001468nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
Maarten Lankhorstdd7cfd62014-01-21 13:07:31 +01001469{
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +01001470 struct reservation_object *resv = nvbo->bo.resv;
Maarten Lankhorstdd7cfd62014-01-21 13:07:31 +01001471
Maarten Lankhorst809e9442014-04-09 16:19:30 +02001472 if (exclusive)
1473 reservation_object_add_excl_fence(resv, &fence->base);
1474 else if (fence)
1475 reservation_object_add_shared_fence(resv, &fence->base);
Maarten Lankhorstdd7cfd62014-01-21 13:07:31 +01001476}
1477
Ben Skeggs6ee73862009-12-11 19:24:15 +10001478struct ttm_bo_driver nouveau_bo_driver = {
Jerome Glisse649bf3c2011-11-01 20:46:13 -04001479 .ttm_tt_create = &nouveau_ttm_tt_create,
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001480 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1481 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
Ben Skeggs6ee73862009-12-11 19:24:15 +10001482 .invalidate_caches = nouveau_bo_invalidate_caches,
1483 .init_mem_type = nouveau_bo_init_mem_type,
1484 .evict_flags = nouveau_bo_evict_flags,
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001485 .move_notify = nouveau_bo_move_ntfy,
Ben Skeggs6ee73862009-12-11 19:24:15 +10001486 .move = nouveau_bo_move,
1487 .verify_access = nouveau_bo_verify_access,
Jerome Glissef32f02f2010-04-09 14:39:25 +02001488 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1489 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1490 .io_mem_free = &nouveau_ttm_io_mem_free,
Ben Skeggs6ee73862009-12-11 19:24:15 +10001491};
1492
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001493struct nouveau_vma *
1494nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1495{
1496 struct nouveau_vma *vma;
1497 list_for_each_entry(vma, &nvbo->vma_list, head) {
1498 if (vma->vm == vm)
1499 return vma;
1500 }
1501
1502 return NULL;
1503}
1504
1505int
1506nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1507 struct nouveau_vma *vma)
1508{
1509 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001510 int ret;
1511
1512 ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1513 NV_MEM_ACCESS_RW, vma);
1514 if (ret)
1515 return ret;
1516
Ben Skeggs2e2cfbe2013-11-15 11:56:49 +10001517 if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
1518 (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
1519 nvbo->page_shift != vma->vm->vmm->lpg_shift))
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001520 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001521
1522 list_add_tail(&vma->head, &nvbo->vma_list);
Ben Skeggs2fd3db62011-06-07 15:25:12 +10001523 vma->refcount = 1;
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001524 return 0;
1525}
1526
1527void
1528nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1529{
1530 if (vma->node) {
Ben Skeggsc4c70442013-05-07 09:48:30 +10001531 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001532 nouveau_vm_unmap(vma);
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001533 nouveau_vm_put(vma);
1534 list_del(&vma->head);
1535 }
1536}