blob: d89f3e700bafa5f0ac5004992961bda61a93b01b [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
Ben Skeggsfdb751e2014-08-10 04:10:23 +100030#include <linux/dma-mapping.h>
Chris Metcalf3e2b7562013-02-01 13:44:33 -050031#include <linux/swiotlb.h>
Ben Skeggs6ee73862009-12-11 19:24:15 +100032
Ben Skeggs4dc28132016-05-20 09:22:55 +100033#include "nouveau_drv.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100034#include "nouveau_dma.h"
Ben Skeggsd375e7d52012-04-30 13:30:00 +100035#include "nouveau_fence.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100036
Ben Skeggsebb945a2012-07-20 08:17:34 +100037#include "nouveau_bo.h"
38#include "nouveau_ttm.h"
39#include "nouveau_gem.h"
Ben Skeggs9ce523c2017-11-01 03:56:19 +100040#include "nouveau_mem.h"
Ben Skeggs24e83752017-11-01 03:56:19 +100041#include "nouveau_vmm.h"
Maarten Maathuisa5106042009-12-26 21:46:36 +010042
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100043/*
44 * NV10-NV40 tiling helpers
45 */
46
47static void
Ben Skeggsebb945a2012-07-20 08:17:34 +100048nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
49 u32 addr, u32 size, u32 pitch, u32 flags)
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100050{
Ben Skeggs77145f12012-07-31 16:16:21 +100051 struct nouveau_drm *drm = nouveau_drm(dev);
Ben Skeggsebb945a2012-07-20 08:17:34 +100052 int i = reg - drm->tile.reg;
Ben Skeggs1167c6b2016-05-18 13:57:42 +100053 struct nvkm_device *device = nvxx_device(&drm->client.device);
Ben Skeggsc85ee6c2015-08-20 14:54:22 +100054 struct nvkm_fb *fb = device->fb;
Ben Skeggsb1e45532015-08-20 14:54:06 +100055 struct nvkm_fb_tile *tile = &fb->tile.region[i];
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100056
Ben Skeggsebb945a2012-07-20 08:17:34 +100057 nouveau_fence_unref(&reg->fence);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100058
59 if (tile->pitch)
Ben Skeggs03c89522015-08-20 14:54:20 +100060 nvkm_fb_tile_fini(fb, i, tile);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100061
62 if (pitch)
Ben Skeggs03c89522015-08-20 14:54:20 +100063 nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100064
Ben Skeggs03c89522015-08-20 14:54:20 +100065 nvkm_fb_tile_prog(fb, i, tile);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100066}
67
Ben Skeggsebb945a2012-07-20 08:17:34 +100068static struct nouveau_drm_tile *
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100069nv10_bo_get_tile_region(struct drm_device *dev, int i)
70{
Ben Skeggs77145f12012-07-31 16:16:21 +100071 struct nouveau_drm *drm = nouveau_drm(dev);
Ben Skeggsebb945a2012-07-20 08:17:34 +100072 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100073
Ben Skeggsebb945a2012-07-20 08:17:34 +100074 spin_lock(&drm->tile.lock);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100075
76 if (!tile->used &&
77 (!tile->fence || nouveau_fence_done(tile->fence)))
78 tile->used = true;
79 else
80 tile = NULL;
81
Ben Skeggsebb945a2012-07-20 08:17:34 +100082 spin_unlock(&drm->tile.lock);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100083 return tile;
84}
85
86static void
Ben Skeggsebb945a2012-07-20 08:17:34 +100087nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
Chris Wilsonf54d1862016-10-25 13:00:45 +010088 struct dma_fence *fence)
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100089{
Ben Skeggs77145f12012-07-31 16:16:21 +100090 struct nouveau_drm *drm = nouveau_drm(dev);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100091
92 if (tile) {
Ben Skeggsebb945a2012-07-20 08:17:34 +100093 spin_lock(&drm->tile.lock);
Chris Wilsonf54d1862016-10-25 13:00:45 +010094 tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100095 tile->used = false;
Ben Skeggsebb945a2012-07-20 08:17:34 +100096 spin_unlock(&drm->tile.lock);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100097 }
98}
99
Ben Skeggsebb945a2012-07-20 08:17:34 +1000100static struct nouveau_drm_tile *
101nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000102 u32 size, u32 pitch, u32 zeta)
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000103{
Ben Skeggs77145f12012-07-31 16:16:21 +1000104 struct nouveau_drm *drm = nouveau_drm(dev);
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000105 struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
Ben Skeggsebb945a2012-07-20 08:17:34 +1000106 struct nouveau_drm_tile *tile, *found = NULL;
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000107 int i;
108
Ben Skeggsb1e45532015-08-20 14:54:06 +1000109 for (i = 0; i < fb->tile.regions; i++) {
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000110 tile = nv10_bo_get_tile_region(dev, i);
111
112 if (pitch && !found) {
113 found = tile;
114 continue;
115
Ben Skeggsb1e45532015-08-20 14:54:06 +1000116 } else if (tile && fb->tile.region[i].pitch) {
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000117 /* Kill an unused tile region. */
118 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
119 }
120
121 nv10_bo_put_tile_region(dev, tile, NULL);
122 }
123
124 if (found)
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000125 nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000126 return found;
127}
128
Ben Skeggs6ee73862009-12-11 19:24:15 +1000129static void
130nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
131{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000132 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
133 struct drm_device *dev = drm->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000134 struct nouveau_bo *nvbo = nouveau_bo(bo);
135
David Herrmann55fb74a2013-10-02 10:15:17 +0200136 if (unlikely(nvbo->gem.filp))
Ben Skeggs6ee73862009-12-11 19:24:15 +1000137 DRM_ERROR("bo %p still attached to GEM object\n", bo);
Maarten Lankhorst4f385592013-07-07 10:37:35 +0200138 WARN_ON(nvbo->pin_refcnt > 0);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000139 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000140 kfree(nvbo);
141}
142
Ben Skeggs4d8b3d32016-05-23 12:34:49 +1000143static inline u64
144roundup_64(u64 x, u32 y)
145{
146 x += y - 1;
147 do_div(x, y);
148 return x * y;
149}
150
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100151static void
Ben Skeggsdb5c8e22011-02-10 13:41:01 +1000152nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
Ben Skeggs4d8b3d32016-05-23 12:34:49 +1000153 int *align, u64 *size)
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100154{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000155 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000156 struct nvif_device *device = &drm->client.device;
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100157
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000158 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000159 if (nvbo->mode) {
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000160 if (device->info.chipset >= 0x40) {
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100161 *align = 65536;
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000162 *size = roundup_64(*size, 64 * nvbo->mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100163
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000164 } else if (device->info.chipset >= 0x30) {
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100165 *align = 32768;
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000166 *size = roundup_64(*size, 64 * nvbo->mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100167
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000168 } else if (device->info.chipset >= 0x20) {
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100169 *align = 16384;
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000170 *size = roundup_64(*size, 64 * nvbo->mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100171
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000172 } else if (device->info.chipset >= 0x10) {
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100173 *align = 16384;
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000174 *size = roundup_64(*size, 32 * nvbo->mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100175 }
176 }
Ben Skeggsbfd83ac2010-11-12 15:12:51 +1000177 } else {
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000178 *size = roundup_64(*size, (1 << nvbo->page));
179 *align = max((1 << nvbo->page), *align);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100180 }
181
Ben Skeggs4d8b3d32016-05-23 12:34:49 +1000182 *size = roundup_64(*size, PAGE_SIZE);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100183}
184
Ben Skeggs6ee73862009-12-11 19:24:15 +1000185int
Ben Skeggs4d8b3d32016-05-23 12:34:49 +1000186nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
Ben Skeggs7375c952011-06-07 14:21:29 +1000187 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
Maarten Lankhorstbb6178b2014-01-09 11:03:15 +0100188 struct sg_table *sg, struct reservation_object *robj,
Ben Skeggs7375c952011-06-07 14:21:29 +1000189 struct nouveau_bo **pnvbo)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000190{
Ben Skeggse75c0912017-11-01 03:56:19 +1000191 struct nouveau_drm *drm = cli->drm;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000192 struct nouveau_bo *nvbo;
Jerome Glisse57de4ba2011-11-11 15:42:57 -0500193 size_t acc_size;
Ben Skeggsf91bac52011-06-06 14:15:46 +1000194 int ret;
Dave Airlie22b33e82012-04-02 11:53:06 +0100195 int type = ttm_bo_type_device;
Maarten Lankhorst35095f72013-07-27 10:17:12 +0200196
Ben Skeggs4d8b3d32016-05-23 12:34:49 +1000197 if (!size) {
198 NV_WARN(drm, "skipped size %016llx\n", size);
Maarten Lankhorst0108bc82013-07-07 10:40:19 +0200199 return -EINVAL;
200 }
Dave Airlie22b33e82012-04-02 11:53:06 +0100201
202 if (sg)
203 type = ttm_bo_type_sg;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000204
205 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
206 if (!nvbo)
207 return -ENOMEM;
208 INIT_LIST_HEAD(&nvbo->head);
209 INIT_LIST_HEAD(&nvbo->entry);
Ben Skeggsfd2871a2011-06-06 14:07:04 +1000210 INIT_LIST_HEAD(&nvbo->vma_list);
Ben Skeggsebb945a2012-07-20 08:17:34 +1000211 nvbo->bo.bdev = &drm->ttm.bdev;
Ben Skeggsbab7cc12016-05-24 17:26:48 +1000212 nvbo->cli = cli;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000213
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000214 if (!nvxx_device(&drm->client.device)->func->cpu_coherent)
Karol Herbstbad3d802016-09-18 12:21:56 +0200215 nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900216
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000217 if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
218 nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
219 nvbo->comp = gf100_pte_storage_type_map[nvbo->kind] != nvbo->kind;
220 } else
221 if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
222 nvbo->kind = (tile_flags & 0x00007f00) >> 8;
223 nvbo->comp = (tile_flags & 0x00030000) >> 16;
224 } else {
225 nvbo->zeta = (tile_flags & 0x00000007);
226 }
227 nvbo->mode = tile_mode;
228 nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
229
230 nvbo->page = 12;
Ben Skeggs3ee6f5b2014-08-10 04:10:20 +1000231 if (drm->client.vm) {
Ben Skeggsf91bac52011-06-06 14:15:46 +1000232 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000233 nvbo->page = drm->client.vm->mmu->lpg_shift;
234 else {
235 if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
236 nvbo->kind = gf100_pte_storage_type_map[nvbo->kind];
237 nvbo->comp = 0;
238 }
Ben Skeggsf91bac52011-06-06 14:15:46 +1000239 }
240
241 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
Ben Skeggsfd2871a2011-06-06 14:07:04 +1000242 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
243 nouveau_bo_placement_set(nvbo, flags, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000244
Ben Skeggsebb945a2012-07-20 08:17:34 +1000245 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
Jerome Glisse57de4ba2011-11-11 15:42:57 -0500246 sizeof(struct nouveau_bo));
247
Ben Skeggsebb945a2012-07-20 08:17:34 +1000248 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
Dave Airlie22b33e82012-04-02 11:53:06 +0100249 type, &nvbo->placement,
Marcin Slusarz0b91c4a2012-11-06 21:49:51 +0000250 align >> PAGE_SHIFT, false, NULL, acc_size, sg,
Maarten Lankhorstbb6178b2014-01-09 11:03:15 +0100251 robj, nouveau_bo_del_ttm);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000252 if (ret) {
253 /* ttm will call nouveau_bo_del_ttm if it fails.. */
254 return ret;
255 }
256
Ben Skeggs6ee73862009-12-11 19:24:15 +1000257 *pnvbo = nvbo;
258 return 0;
259}
260
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100261static void
Christian Königf1217ed2014-08-27 13:16:04 +0200262set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000263{
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100264 *n = 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000265
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100266 if (type & TTM_PL_FLAG_VRAM)
Christian Königf1217ed2014-08-27 13:16:04 +0200267 pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100268 if (type & TTM_PL_FLAG_TT)
Christian Königf1217ed2014-08-27 13:16:04 +0200269 pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100270 if (type & TTM_PL_FLAG_SYSTEM)
Christian Königf1217ed2014-08-27 13:16:04 +0200271 pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100272}
Ben Skeggs37cb3e082009-12-16 16:22:42 +1000273
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200274static void
275set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
276{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000277 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000278 u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
Christian Königf1217ed2014-08-27 13:16:04 +0200279 unsigned i, fpfn, lpfn;
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200280
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000281 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000282 nvbo->mode && (type & TTM_PL_FLAG_VRAM) &&
Francisco Jerez4beb1162011-11-06 21:21:28 +0100283 nvbo->bo.mem.num_pages < vram_pages / 4) {
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200284 /*
285 * Make sure that the color and depth buffers are handled
286 * by independent memory controller units. Up to a 9x
287 * speed up when alpha-blending and depth-test are enabled
288 * at the same time.
289 */
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000290 if (nvbo->zeta) {
Christian Königf1217ed2014-08-27 13:16:04 +0200291 fpfn = vram_pages / 2;
292 lpfn = ~0;
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200293 } else {
Christian Königf1217ed2014-08-27 13:16:04 +0200294 fpfn = 0;
295 lpfn = vram_pages / 2;
296 }
297 for (i = 0; i < nvbo->placement.num_placement; ++i) {
298 nvbo->placements[i].fpfn = fpfn;
299 nvbo->placements[i].lpfn = lpfn;
300 }
301 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
302 nvbo->busy_placements[i].fpfn = fpfn;
303 nvbo->busy_placements[i].lpfn = lpfn;
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200304 }
305 }
306}
307
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100308void
309nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
310{
311 struct ttm_placement *pl = &nvbo->placement;
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900312 uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
313 TTM_PL_MASK_CACHING) |
314 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100315
316 pl->placement = nvbo->placements;
317 set_placement_list(nvbo->placements, &pl->num_placement,
318 type, flags);
319
320 pl->busy_placement = nvbo->busy_placements;
321 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
322 type | busy, flags);
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200323
324 set_placement_range(nvbo, type);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000325}
326
327int
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000328nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000329{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000330 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000331 struct ttm_buffer_object *bo = &nvbo->bo;
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000332 bool force = false, evict = false;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100333 int ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000334
Christian Königdfd5e502016-04-06 11:12:03 +0200335 ret = ttm_bo_reserve(bo, false, false, NULL);
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100336 if (ret)
Ben Skeggs50ab2e52014-11-10 11:12:17 +1000337 return ret;
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100338
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000339 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000340 memtype == TTM_PL_FLAG_VRAM && contig) {
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000341 if (!nvbo->contig) {
342 nvbo->contig = true;
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000343 force = true;
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000344 evict = true;
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000345 }
346 }
347
348 if (nvbo->pin_refcnt) {
349 if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
350 NV_ERROR(drm, "bo %p pinned elsewhere: "
351 "0x%08x vs 0x%08x\n", bo,
352 1 << bo->mem.mem_type, memtype);
353 ret = -EBUSY;
354 }
355 nvbo->pin_refcnt++;
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100356 goto out;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000357 }
358
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000359 if (evict) {
360 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
361 ret = nouveau_bo_validate(nvbo, false, false);
362 if (ret)
363 goto out;
364 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000365
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000366 nvbo->pin_refcnt++;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100367 nouveau_bo_placement_set(nvbo, memtype, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000368
Ben Skeggs50ab2e52014-11-10 11:12:17 +1000369 /* drop pin_refcnt temporarily, so we don't trip the assertion
370 * in nouveau_bo_move() that makes sure we're not trying to
371 * move a pinned buffer
372 */
373 nvbo->pin_refcnt--;
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000374 ret = nouveau_bo_validate(nvbo, false, false);
Ben Skeggs6aac6ce2014-11-06 14:34:31 +1000375 if (ret)
376 goto out;
Ben Skeggs50ab2e52014-11-10 11:12:17 +1000377 nvbo->pin_refcnt++;
Ben Skeggs6aac6ce2014-11-06 14:34:31 +1000378
379 switch (bo->mem.mem_type) {
380 case TTM_PL_VRAM:
381 drm->gem.vram_available -= bo->mem.size;
382 break;
383 case TTM_PL_TT:
384 drm->gem.gart_available -= bo->mem.size;
385 break;
386 default:
387 break;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000388 }
Alexandre Courbot5be5a152014-10-27 18:11:52 +0900389
Ben Skeggs6ee73862009-12-11 19:24:15 +1000390out:
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000391 if (force && ret)
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000392 nvbo->contig = false;
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100393 ttm_bo_unreserve(bo);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000394 return ret;
395}
396
397int
398nouveau_bo_unpin(struct nouveau_bo *nvbo)
399{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000400 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000401 struct ttm_buffer_object *bo = &nvbo->bo;
Maarten Lankhorst4f385592013-07-07 10:37:35 +0200402 int ret, ref;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000403
Christian Königdfd5e502016-04-06 11:12:03 +0200404 ret = ttm_bo_reserve(bo, false, false, NULL);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000405 if (ret)
406 return ret;
407
Maarten Lankhorst4f385592013-07-07 10:37:35 +0200408 ref = --nvbo->pin_refcnt;
409 WARN_ON_ONCE(ref < 0);
410 if (ref)
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100411 goto out;
412
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100413 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000414
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000415 ret = nouveau_bo_validate(nvbo, false, false);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000416 if (ret == 0) {
417 switch (bo->mem.mem_type) {
418 case TTM_PL_VRAM:
Ben Skeggsebb945a2012-07-20 08:17:34 +1000419 drm->gem.vram_available += bo->mem.size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000420 break;
421 case TTM_PL_TT:
Ben Skeggsebb945a2012-07-20 08:17:34 +1000422 drm->gem.gart_available += bo->mem.size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000423 break;
424 default:
425 break;
426 }
427 }
428
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100429out:
Ben Skeggs6ee73862009-12-11 19:24:15 +1000430 ttm_bo_unreserve(bo);
431 return ret;
432}
433
434int
435nouveau_bo_map(struct nouveau_bo *nvbo)
436{
437 int ret;
438
Christian Königdfd5e502016-04-06 11:12:03 +0200439 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000440 if (ret)
441 return ret;
442
Alexandre Courbot36a471b2016-07-13 15:29:35 +0900443 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900444
Ben Skeggs6ee73862009-12-11 19:24:15 +1000445 ttm_bo_unreserve(&nvbo->bo);
446 return ret;
447}
448
449void
450nouveau_bo_unmap(struct nouveau_bo *nvbo)
451{
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900452 if (!nvbo)
453 return;
454
Alexandre Courbot36a471b2016-07-13 15:29:35 +0900455 ttm_bo_kunmap(&nvbo->kmap);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000456}
457
Alexandre Courbotb22870b2014-10-27 18:49:19 +0900458void
459nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
460{
461 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000462 struct nvkm_device *device = nvxx_device(&drm->client.device);
Alexandre Courbotb22870b2014-10-27 18:49:19 +0900463 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
464 int i;
465
466 if (!ttm_dma)
467 return;
468
469 /* Don't waste time looping if the object is coherent */
470 if (nvbo->force_coherent)
471 return;
472
473 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
Ben Skeggs26c9e8e2015-08-20 14:54:23 +1000474 dma_sync_single_for_device(device->dev, ttm_dma->dma_address[i],
475 PAGE_SIZE, DMA_TO_DEVICE);
Alexandre Courbotb22870b2014-10-27 18:49:19 +0900476}
477
478void
479nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
480{
481 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000482 struct nvkm_device *device = nvxx_device(&drm->client.device);
Alexandre Courbotb22870b2014-10-27 18:49:19 +0900483 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
484 int i;
485
486 if (!ttm_dma)
487 return;
488
489 /* Don't waste time looping if the object is coherent */
490 if (nvbo->force_coherent)
491 return;
492
493 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
Ben Skeggs26c9e8e2015-08-20 14:54:23 +1000494 dma_sync_single_for_cpu(device->dev, ttm_dma->dma_address[i],
495 PAGE_SIZE, DMA_FROM_DEVICE);
Alexandre Courbotb22870b2014-10-27 18:49:19 +0900496}
497
Ben Skeggs7a45d762010-11-22 08:50:27 +1000498int
499nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000500 bool no_wait_gpu)
Ben Skeggs7a45d762010-11-22 08:50:27 +1000501{
502 int ret;
503
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000504 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
505 interruptible, no_wait_gpu);
Ben Skeggs7a45d762010-11-22 08:50:27 +1000506 if (ret)
507 return ret;
508
Alexandre Courbotb22870b2014-10-27 18:49:19 +0900509 nouveau_bo_sync_for_device(nvbo);
510
Ben Skeggs7a45d762010-11-22 08:50:27 +1000511 return 0;
512}
513
Ben Skeggs6ee73862009-12-11 19:24:15 +1000514void
515nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
516{
517 bool is_iomem;
518 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900519
Alexandre Courbot36a471b2016-07-13 15:29:35 +0900520 mem += index;
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900521
Ben Skeggs6ee73862009-12-11 19:24:15 +1000522 if (is_iomem)
523 iowrite16_native(val, (void __force __iomem *)mem);
524 else
525 *mem = val;
526}
527
528u32
529nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
530{
531 bool is_iomem;
532 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900533
Alexandre Courbot36a471b2016-07-13 15:29:35 +0900534 mem += index;
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900535
Ben Skeggs6ee73862009-12-11 19:24:15 +1000536 if (is_iomem)
537 return ioread32_native((void __force __iomem *)mem);
538 else
539 return *mem;
540}
541
542void
543nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
544{
545 bool is_iomem;
546 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900547
Alexandre Courbot36a471b2016-07-13 15:29:35 +0900548 mem += index;
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900549
Ben Skeggs6ee73862009-12-11 19:24:15 +1000550 if (is_iomem)
551 iowrite32_native(val, (void __force __iomem *)mem);
552 else
553 *mem = val;
554}
555
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400556static struct ttm_tt *
Ben Skeggsebb945a2012-07-20 08:17:34 +1000557nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
558 uint32_t page_flags, struct page *dummy_read)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000559{
Daniel Vettera7fb8a22015-09-09 16:45:52 +0200560#if IS_ENABLED(CONFIG_AGP)
Ben Skeggsebb945a2012-07-20 08:17:34 +1000561 struct nouveau_drm *drm = nouveau_bdev(bdev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000562
Ben Skeggs340b0e72015-08-20 14:54:23 +1000563 if (drm->agp.bridge) {
564 return ttm_agp_tt_create(bdev, drm->agp.bridge, size,
Ben Skeggsebb945a2012-07-20 08:17:34 +1000565 page_flags, dummy_read);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000566 }
Max Filippovdf1b4b92012-10-14 01:58:26 +0400567#endif
Ben Skeggs6ee73862009-12-11 19:24:15 +1000568
Ben Skeggsebb945a2012-07-20 08:17:34 +1000569 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000570}
571
572static int
573nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
574{
575 /* We'll do this from user space. */
576 return 0;
577}
578
579static int
580nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
581 struct ttm_mem_type_manager *man)
582{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000583 struct nouveau_drm *drm = nouveau_bdev(bdev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000584
585 switch (type) {
586 case TTM_PL_SYSTEM:
587 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
588 man->available_caching = TTM_PL_MASK_CACHING;
589 man->default_caching = TTM_PL_FLAG_CACHED;
590 break;
591 case TTM_PL_VRAM:
Alexandre Courbote2a4e782014-06-27 19:28:50 +0900592 man->flags = TTM_MEMTYPE_FLAG_FIXED |
593 TTM_MEMTYPE_FLAG_MAPPABLE;
594 man->available_caching = TTM_PL_FLAG_UNCACHED |
595 TTM_PL_FLAG_WC;
596 man->default_caching = TTM_PL_FLAG_WC;
597
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000598 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
Alexandre Courbote2a4e782014-06-27 19:28:50 +0900599 /* Some BARs do not support being ioremapped WC */
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000600 if (nvxx_bar(&drm->client.device)->iomap_uncached) {
Alexandre Courbote2a4e782014-06-27 19:28:50 +0900601 man->available_caching = TTM_PL_FLAG_UNCACHED;
602 man->default_caching = TTM_PL_FLAG_UNCACHED;
603 }
604
Ben Skeggs573a2a32010-08-25 15:26:04 +1000605 man->func = &nouveau_vram_manager;
Ben Skeggsf869ef82010-11-15 11:53:16 +1000606 man->io_reserve_fastpath = false;
607 man->use_io_reserve_lru = true;
608 } else {
Ben Skeggs573a2a32010-08-25 15:26:04 +1000609 man->func = &ttm_bo_manager_func;
Ben Skeggsf869ef82010-11-15 11:53:16 +1000610 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000611 break;
612 case TTM_PL_TT:
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000613 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
Ben Skeggs26c0c9e2011-02-10 12:59:51 +1000614 man->func = &nouveau_gart_manager;
615 else
Ben Skeggs340b0e72015-08-20 14:54:23 +1000616 if (!drm->agp.bridge)
Ben Skeggs3863c9b2012-07-14 19:09:17 +1000617 man->func = &nv04_gart_manager;
618 else
Ben Skeggs26c0c9e2011-02-10 12:59:51 +1000619 man->func = &ttm_bo_manager_func;
Ben Skeggsebb945a2012-07-20 08:17:34 +1000620
Ben Skeggs340b0e72015-08-20 14:54:23 +1000621 if (drm->agp.bridge) {
Jerome Glissef32f02f2010-04-09 14:39:25 +0200622 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
Francisco Jereza3d487e2010-11-20 22:11:22 +0100623 man->available_caching = TTM_PL_FLAG_UNCACHED |
624 TTM_PL_FLAG_WC;
625 man->default_caching = TTM_PL_FLAG_WC;
Ben Skeggsebb945a2012-07-20 08:17:34 +1000626 } else {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000627 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
628 TTM_MEMTYPE_FLAG_CMA;
629 man->available_caching = TTM_PL_MASK_CACHING;
630 man->default_caching = TTM_PL_FLAG_CACHED;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000631 }
Ben Skeggsebb945a2012-07-20 08:17:34 +1000632
Ben Skeggs6ee73862009-12-11 19:24:15 +1000633 break;
634 default:
Ben Skeggs6ee73862009-12-11 19:24:15 +1000635 return -EINVAL;
636 }
637 return 0;
638}
639
640static void
641nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
642{
643 struct nouveau_bo *nvbo = nouveau_bo(bo);
644
645 switch (bo->mem.mem_type) {
Francisco Jerez22fbd532009-12-11 18:40:17 +0100646 case TTM_PL_VRAM:
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100647 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
648 TTM_PL_FLAG_SYSTEM);
Francisco Jerez22fbd532009-12-11 18:40:17 +0100649 break;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000650 default:
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100651 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000652 break;
653 }
Francisco Jerez22fbd532009-12-11 18:40:17 +0100654
655 *pl = nvbo->placement;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000656}
657
658
Ben Skeggs6ee73862009-12-11 19:24:15 +1000659static int
Ben Skeggs49981042012-08-06 19:38:25 +1000660nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
661{
662 int ret = RING_SPACE(chan, 2);
663 if (ret == 0) {
664 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
Ben Skeggs00fc6f62013-07-09 14:20:15 +1000665 OUT_RING (chan, handle & 0x0000ffff);
Ben Skeggs49981042012-08-06 19:38:25 +1000666 FIRE_RING (chan);
667 }
668 return ret;
669}
670
671static int
Ben Skeggsc6b7e892012-03-20 14:36:04 +1000672nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000673 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggsc6b7e892012-03-20 14:36:04 +1000674{
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000675 struct nouveau_mem *mem = nouveau_mem(old_reg);
Ben Skeggsc6b7e892012-03-20 14:36:04 +1000676 int ret = RING_SPACE(chan, 10);
677 if (ret == 0) {
Ben Skeggs6d597022012-04-01 21:09:13 +1000678 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000679 OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
680 OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
681 OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
682 OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
Ben Skeggsc6b7e892012-03-20 14:36:04 +1000683 OUT_RING (chan, PAGE_SIZE);
684 OUT_RING (chan, PAGE_SIZE);
685 OUT_RING (chan, PAGE_SIZE);
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000686 OUT_RING (chan, new_reg->num_pages);
Ben Skeggs6d597022012-04-01 21:09:13 +1000687 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
Ben Skeggsc6b7e892012-03-20 14:36:04 +1000688 }
689 return ret;
690}
691
692static int
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000693nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
694{
695 int ret = RING_SPACE(chan, 2);
696 if (ret == 0) {
697 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
698 OUT_RING (chan, handle);
699 }
700 return ret;
701}
702
703static int
Ben Skeggs1a460982012-05-04 15:17:28 +1000704nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000705 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggs1a460982012-05-04 15:17:28 +1000706{
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000707 struct nouveau_mem *mem = nouveau_mem(old_reg);
708 u64 src_offset = mem->vma[0].addr;
709 u64 dst_offset = mem->vma[1].addr;
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000710 u32 page_count = new_reg->num_pages;
Ben Skeggs1a460982012-05-04 15:17:28 +1000711 int ret;
712
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000713 page_count = new_reg->num_pages;
Ben Skeggs1a460982012-05-04 15:17:28 +1000714 while (page_count) {
715 int line_count = (page_count > 8191) ? 8191 : page_count;
716
717 ret = RING_SPACE(chan, 11);
718 if (ret)
719 return ret;
720
721 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
722 OUT_RING (chan, upper_32_bits(src_offset));
723 OUT_RING (chan, lower_32_bits(src_offset));
724 OUT_RING (chan, upper_32_bits(dst_offset));
725 OUT_RING (chan, lower_32_bits(dst_offset));
726 OUT_RING (chan, PAGE_SIZE);
727 OUT_RING (chan, PAGE_SIZE);
728 OUT_RING (chan, PAGE_SIZE);
729 OUT_RING (chan, line_count);
730 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
731 OUT_RING (chan, 0x00000110);
732
733 page_count -= line_count;
734 src_offset += (PAGE_SIZE * line_count);
735 dst_offset += (PAGE_SIZE * line_count);
736 }
737
738 return 0;
739}
740
741static int
Ben Skeggs183720b2010-12-09 15:17:10 +1000742nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000743 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggs183720b2010-12-09 15:17:10 +1000744{
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000745 struct nouveau_mem *mem = nouveau_mem(old_reg);
746 u64 src_offset = mem->vma[0].addr;
747 u64 dst_offset = mem->vma[1].addr;
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000748 u32 page_count = new_reg->num_pages;
Ben Skeggs183720b2010-12-09 15:17:10 +1000749 int ret;
750
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000751 page_count = new_reg->num_pages;
Ben Skeggs183720b2010-12-09 15:17:10 +1000752 while (page_count) {
753 int line_count = (page_count > 2047) ? 2047 : page_count;
754
755 ret = RING_SPACE(chan, 12);
756 if (ret)
757 return ret;
758
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000759 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
Ben Skeggs183720b2010-12-09 15:17:10 +1000760 OUT_RING (chan, upper_32_bits(dst_offset));
761 OUT_RING (chan, lower_32_bits(dst_offset));
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000762 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
Ben Skeggs183720b2010-12-09 15:17:10 +1000763 OUT_RING (chan, upper_32_bits(src_offset));
764 OUT_RING (chan, lower_32_bits(src_offset));
765 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
766 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
767 OUT_RING (chan, PAGE_SIZE); /* line_length */
768 OUT_RING (chan, line_count);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000769 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
Ben Skeggs183720b2010-12-09 15:17:10 +1000770 OUT_RING (chan, 0x00100110);
771
772 page_count -= line_count;
773 src_offset += (PAGE_SIZE * line_count);
774 dst_offset += (PAGE_SIZE * line_count);
775 }
776
777 return 0;
778}
779
780static int
Ben Skeggsfdf53242012-05-04 15:15:12 +1000781nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000782 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggsfdf53242012-05-04 15:15:12 +1000783{
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000784 struct nouveau_mem *mem = nouveau_mem(old_reg);
785 u64 src_offset = mem->vma[0].addr;
786 u64 dst_offset = mem->vma[1].addr;
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000787 u32 page_count = new_reg->num_pages;
Ben Skeggsfdf53242012-05-04 15:15:12 +1000788 int ret;
789
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000790 page_count = new_reg->num_pages;
Ben Skeggsfdf53242012-05-04 15:15:12 +1000791 while (page_count) {
792 int line_count = (page_count > 8191) ? 8191 : page_count;
793
794 ret = RING_SPACE(chan, 11);
795 if (ret)
796 return ret;
797
798 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
799 OUT_RING (chan, upper_32_bits(src_offset));
800 OUT_RING (chan, lower_32_bits(src_offset));
801 OUT_RING (chan, upper_32_bits(dst_offset));
802 OUT_RING (chan, lower_32_bits(dst_offset));
803 OUT_RING (chan, PAGE_SIZE);
804 OUT_RING (chan, PAGE_SIZE);
805 OUT_RING (chan, PAGE_SIZE);
806 OUT_RING (chan, line_count);
807 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
808 OUT_RING (chan, 0x00000110);
809
810 page_count -= line_count;
811 src_offset += (PAGE_SIZE * line_count);
812 dst_offset += (PAGE_SIZE * line_count);
813 }
814
815 return 0;
816}
817
818static int
Ben Skeggs5490e5d2012-05-04 14:34:16 +1000819nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000820 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggs5490e5d2012-05-04 14:34:16 +1000821{
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000822 struct nouveau_mem *mem = nouveau_mem(old_reg);
Ben Skeggs5490e5d2012-05-04 14:34:16 +1000823 int ret = RING_SPACE(chan, 7);
824 if (ret == 0) {
825 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000826 OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
827 OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
828 OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
829 OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
Ben Skeggs5490e5d2012-05-04 14:34:16 +1000830 OUT_RING (chan, 0x00000000 /* COPY */);
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000831 OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
Ben Skeggs5490e5d2012-05-04 14:34:16 +1000832 }
833 return ret;
834}
835
836static int
Ben Skeggs4c193d22012-05-04 14:21:15 +1000837nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000838 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggs4c193d22012-05-04 14:21:15 +1000839{
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000840 struct nouveau_mem *mem = nouveau_mem(old_reg);
Ben Skeggs4c193d22012-05-04 14:21:15 +1000841 int ret = RING_SPACE(chan, 7);
842 if (ret == 0) {
843 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000844 OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000845 OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
846 OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
847 OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
848 OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
Ben Skeggs4c193d22012-05-04 14:21:15 +1000849 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
850 }
851 return ret;
852}
853
854static int
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000855nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
856{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000857 int ret = RING_SPACE(chan, 6);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000858 if (ret == 0) {
Ben Skeggsebb945a2012-07-20 08:17:34 +1000859 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
860 OUT_RING (chan, handle);
861 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
Ben Skeggsf45f55c2014-08-10 04:10:23 +1000862 OUT_RING (chan, chan->drm->ntfy.handle);
863 OUT_RING (chan, chan->vram.handle);
864 OUT_RING (chan, chan->vram.handle);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000865 }
866
867 return ret;
868}
869
870static int
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000871nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000872 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000873{
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000874 struct nouveau_mem *mem = nouveau_mem(old_reg);
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000875 u64 length = (new_reg->num_pages << PAGE_SHIFT);
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000876 u64 src_offset = mem->vma[0].addr;
877 u64 dst_offset = mem->vma[1].addr;
878 int src_tiled = !!mem->kind;
879 int dst_tiled = !!nouveau_mem(new_reg)->kind;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000880 int ret;
881
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000882 while (length) {
883 u32 amount, stride, height;
884
Maarten Lankhorstce8f7692013-11-12 13:34:08 +0100885 ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
886 if (ret)
887 return ret;
888
Ben Skeggs5220b3c2010-09-23 15:21:17 +1000889 amount = min(length, (u64)(4 * 1024 * 1024));
890 stride = 16 * 4;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000891 height = amount / stride;
892
Maarten Lankhorstce8f7692013-11-12 13:34:08 +0100893 if (src_tiled) {
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000894 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000895 OUT_RING (chan, 0);
Ben Skeggs5220b3c2010-09-23 15:21:17 +1000896 OUT_RING (chan, 0);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000897 OUT_RING (chan, stride);
898 OUT_RING (chan, height);
899 OUT_RING (chan, 1);
900 OUT_RING (chan, 0);
901 OUT_RING (chan, 0);
902 } else {
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000903 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000904 OUT_RING (chan, 1);
905 }
Maarten Lankhorstce8f7692013-11-12 13:34:08 +0100906 if (dst_tiled) {
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000907 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000908 OUT_RING (chan, 0);
Ben Skeggs5220b3c2010-09-23 15:21:17 +1000909 OUT_RING (chan, 0);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000910 OUT_RING (chan, stride);
911 OUT_RING (chan, height);
912 OUT_RING (chan, 1);
913 OUT_RING (chan, 0);
914 OUT_RING (chan, 0);
915 } else {
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000916 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000917 OUT_RING (chan, 1);
918 }
919
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000920 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000921 OUT_RING (chan, upper_32_bits(src_offset));
922 OUT_RING (chan, upper_32_bits(dst_offset));
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000923 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000924 OUT_RING (chan, lower_32_bits(src_offset));
925 OUT_RING (chan, lower_32_bits(dst_offset));
926 OUT_RING (chan, stride);
927 OUT_RING (chan, stride);
928 OUT_RING (chan, stride);
929 OUT_RING (chan, height);
930 OUT_RING (chan, 0x00000101);
931 OUT_RING (chan, 0x00000000);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000932 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000933 OUT_RING (chan, 0);
934
935 length -= amount;
936 src_offset += amount;
937 dst_offset += amount;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000938 }
939
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000940 return 0;
941}
942
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000943static int
944nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
945{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000946 int ret = RING_SPACE(chan, 4);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000947 if (ret == 0) {
Ben Skeggsebb945a2012-07-20 08:17:34 +1000948 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
949 OUT_RING (chan, handle);
950 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
Ben Skeggsf45f55c2014-08-10 04:10:23 +1000951 OUT_RING (chan, chan->drm->ntfy.handle);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000952 }
953
954 return ret;
955}
956
Ben Skeggsa6704782011-02-16 09:10:20 +1000957static inline uint32_t
958nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000959 struct nouveau_channel *chan, struct ttm_mem_reg *reg)
Ben Skeggsa6704782011-02-16 09:10:20 +1000960{
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000961 if (reg->mem_type == TTM_PL_TT)
Ben Skeggsebb945a2012-07-20 08:17:34 +1000962 return NvDmaTT;
Ben Skeggsf45f55c2014-08-10 04:10:23 +1000963 return chan->vram.handle;
Ben Skeggsa6704782011-02-16 09:10:20 +1000964}
965
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000966static int
967nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000968 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000969{
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000970 u32 src_offset = old_reg->start << PAGE_SHIFT;
971 u32 dst_offset = new_reg->start << PAGE_SHIFT;
972 u32 page_count = new_reg->num_pages;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000973 int ret;
974
975 ret = RING_SPACE(chan, 3);
976 if (ret)
977 return ret;
978
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000979 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000980 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_reg));
981 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_reg));
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000982
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000983 page_count = new_reg->num_pages;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000984 while (page_count) {
985 int line_count = (page_count > 2047) ? 2047 : page_count;
986
Ben Skeggs6ee73862009-12-11 19:24:15 +1000987 ret = RING_SPACE(chan, 11);
988 if (ret)
989 return ret;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000990
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000991 BEGIN_NV04(chan, NvSubCopy,
Ben Skeggs6ee73862009-12-11 19:24:15 +1000992 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000993 OUT_RING (chan, src_offset);
994 OUT_RING (chan, dst_offset);
995 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
996 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
997 OUT_RING (chan, PAGE_SIZE); /* line_length */
998 OUT_RING (chan, line_count);
999 OUT_RING (chan, 0x00000101);
1000 OUT_RING (chan, 0x00000000);
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001001 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001002 OUT_RING (chan, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001003
1004 page_count -= line_count;
1005 src_offset += (PAGE_SIZE * line_count);
1006 dst_offset += (PAGE_SIZE * line_count);
1007 }
1008
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001009 return 0;
1010}
1011
1012static int
Ben Skeggs3c57d852013-11-22 10:35:25 +10001013nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001014 struct ttm_mem_reg *reg)
Ben Skeggsd2f966662011-06-06 20:54:42 +10001015{
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001016 struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
1017 struct nouveau_mem *new_mem = nouveau_mem(reg);
1018 struct nvkm_vm *vmm = drm->client.vm;
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001019 u64 size = (u64)reg->num_pages << PAGE_SHIFT;
Ben Skeggsd2f966662011-06-06 20:54:42 +10001020 int ret;
1021
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001022 ret = nvkm_vm_get(vmm, size, old_mem->mem.page, NV_MEM_ACCESS_RW,
1023 &old_mem->vma[0]);
Ben Skeggsd2f966662011-06-06 20:54:42 +10001024 if (ret)
1025 return ret;
1026
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001027 ret = nvkm_vm_get(vmm, size, new_mem->mem.page, NV_MEM_ACCESS_RW,
1028 &old_mem->vma[1]);
Ben Skeggs3c57d852013-11-22 10:35:25 +10001029 if (ret) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001030 nvkm_vm_put(&old_mem->vma[0]);
Ben Skeggs3c57d852013-11-22 10:35:25 +10001031 return ret;
1032 }
1033
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001034 ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]);
1035 if (ret)
1036 goto done;
1037
1038 ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]);
1039done:
1040 if (ret) {
1041 nvkm_vm_put(&old_mem->vma[1]);
1042 nvkm_vm_put(&old_mem->vma[0]);
1043 }
Ben Skeggsd2f966662011-06-06 20:54:42 +10001044 return 0;
1045}
1046
1047static int
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001048nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001049 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001050{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001051 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
Dave Jones1934a2a2013-09-17 17:26:34 -04001052 struct nouveau_channel *chan = drm->ttm.chan;
Ben Skeggsa01ca782015-08-20 14:54:15 +10001053 struct nouveau_cli *cli = (void *)chan->user.client;
Ben Skeggs35b81412013-11-22 10:39:57 +10001054 struct nouveau_fence *fence;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001055 int ret;
1056
Ben Skeggsd2f966662011-06-06 20:54:42 +10001057 /* create temporary vmas for the transfer and attach them to the
Ben Skeggsbe83cd42015-01-14 15:36:34 +10001058 * old nvkm_mem node, these will get cleaned up after ttm has
Ben Skeggsd2f966662011-06-06 20:54:42 +10001059 * destroyed the ttm_mem_reg
Ben Skeggs3425df42011-02-10 11:22:12 +10001060 */
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001061 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001062 ret = nouveau_bo_move_prep(drm, bo, new_reg);
Ben Skeggsd2f966662011-06-06 20:54:42 +10001063 if (ret)
Ben Skeggs3c57d852013-11-22 10:35:25 +10001064 return ret;
Ben Skeggs3425df42011-02-10 11:22:12 +10001065 }
1066
Ben Skeggs0ad72862014-08-10 04:10:22 +10001067 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
Maarten Lankhorste3be4c22014-09-16 11:15:07 +02001068 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
Ben Skeggs6a6b73f2010-10-05 16:53:48 +10001069 if (ret == 0) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001070 ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
Ben Skeggs35b81412013-11-22 10:39:57 +10001071 if (ret == 0) {
1072 ret = nouveau_fence_new(chan, false, &fence);
1073 if (ret == 0) {
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +02001074 ret = ttm_bo_move_accel_cleanup(bo,
1075 &fence->base,
Ben Skeggs35b81412013-11-22 10:39:57 +10001076 evict,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001077 new_reg);
Ben Skeggs35b81412013-11-22 10:39:57 +10001078 nouveau_fence_unref(&fence);
1079 }
1080 }
Ben Skeggs6a6b73f2010-10-05 16:53:48 +10001081 }
Ben Skeggs0ad72862014-08-10 04:10:22 +10001082 mutex_unlock(&cli->mutex);
Ben Skeggs6a6b73f2010-10-05 16:53:48 +10001083 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001084}
1085
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001086void
Ben Skeggs49981042012-08-06 19:38:25 +10001087nouveau_bo_move_init(struct nouveau_drm *drm)
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001088{
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001089 static const struct {
1090 const char *name;
Ben Skeggs1a460982012-05-04 15:17:28 +10001091 int engine;
Ben Skeggs315a8b22015-08-20 14:54:16 +10001092 s32 oclass;
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001093 int (*exec)(struct nouveau_channel *,
1094 struct ttm_buffer_object *,
1095 struct ttm_mem_reg *, struct ttm_mem_reg *);
1096 int (*init)(struct nouveau_channel *, u32 handle);
1097 } _methods[] = {
Ben Skeggs146cfe22016-07-09 10:41:01 +10001098 { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
1099 { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
Ben Skeggs8e7e15862016-07-09 10:41:01 +10001100 { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
1101 { "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
Ben Skeggs990b4542015-04-14 11:50:35 +10001102 { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
1103 { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
Ben Skeggs00fc6f62013-07-09 14:20:15 +10001104 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
Ben Skeggs49981042012-08-06 19:38:25 +10001105 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
Ben Skeggs1a460982012-05-04 15:17:28 +10001106 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1107 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1108 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1109 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1110 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1111 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1112 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
Ben Skeggs5490e5d2012-05-04 14:34:16 +10001113 {},
Ben Skeggs1a460982012-05-04 15:17:28 +10001114 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001115 }, *mthd = _methods;
1116 const char *name = "CPU";
1117 int ret;
1118
1119 do {
Ben Skeggs49981042012-08-06 19:38:25 +10001120 struct nouveau_channel *chan;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001121
Ben Skeggs00fc6f62013-07-09 14:20:15 +10001122 if (mthd->engine)
Ben Skeggs49981042012-08-06 19:38:25 +10001123 chan = drm->cechan;
1124 else
1125 chan = drm->channel;
1126 if (chan == NULL)
1127 continue;
1128
Ben Skeggsa01ca782015-08-20 14:54:15 +10001129 ret = nvif_object_init(&chan->user,
Ben Skeggs0ad72862014-08-10 04:10:22 +10001130 mthd->oclass | (mthd->engine << 16),
1131 mthd->oclass, NULL, 0,
1132 &drm->ttm.copy);
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001133 if (ret == 0) {
Ben Skeggs0ad72862014-08-10 04:10:22 +10001134 ret = mthd->init(chan, drm->ttm.copy.handle);
Ben Skeggsebb945a2012-07-20 08:17:34 +10001135 if (ret) {
Ben Skeggs0ad72862014-08-10 04:10:22 +10001136 nvif_object_fini(&drm->ttm.copy);
Ben Skeggsebb945a2012-07-20 08:17:34 +10001137 continue;
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001138 }
Ben Skeggsebb945a2012-07-20 08:17:34 +10001139
1140 drm->ttm.move = mthd->exec;
Ben Skeggs1bb3f6a2013-07-08 10:40:35 +10001141 drm->ttm.chan = chan;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001142 name = mthd->name;
1143 break;
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001144 }
1145 } while ((++mthd)->exec);
1146
Ben Skeggsebb945a2012-07-20 08:17:34 +10001147 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001148}
1149
Ben Skeggs6ee73862009-12-11 19:24:15 +10001150static int
1151nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001152 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
Ben Skeggs6ee73862009-12-11 19:24:15 +10001153{
Christian Königf1217ed2014-08-27 13:16:04 +02001154 struct ttm_place placement_memtype = {
1155 .fpfn = 0,
1156 .lpfn = 0,
1157 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1158 };
Ben Skeggs6ee73862009-12-11 19:24:15 +10001159 struct ttm_placement placement;
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001160 struct ttm_mem_reg tmp_reg;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001161 int ret;
1162
Ben Skeggs6ee73862009-12-11 19:24:15 +10001163 placement.num_placement = placement.num_busy_placement = 1;
Francisco Jerez77e2b5e2009-12-16 19:05:00 +01001164 placement.placement = placement.busy_placement = &placement_memtype;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001165
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001166 tmp_reg = *new_reg;
1167 tmp_reg.mm_node = NULL;
1168 ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001169 if (ret)
1170 return ret;
1171
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001172 ret = ttm_tt_bind(bo->ttm, &tmp_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001173 if (ret)
1174 goto out;
1175
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001176 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001177 if (ret)
1178 goto out;
1179
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001180 ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001181out:
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001182 ttm_bo_mem_put(bo, &tmp_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001183 return ret;
1184}
1185
1186static int
1187nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001188 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
Ben Skeggs6ee73862009-12-11 19:24:15 +10001189{
Christian Königf1217ed2014-08-27 13:16:04 +02001190 struct ttm_place placement_memtype = {
1191 .fpfn = 0,
1192 .lpfn = 0,
1193 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1194 };
Ben Skeggs6ee73862009-12-11 19:24:15 +10001195 struct ttm_placement placement;
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001196 struct ttm_mem_reg tmp_reg;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001197 int ret;
1198
Ben Skeggs6ee73862009-12-11 19:24:15 +10001199 placement.num_placement = placement.num_busy_placement = 1;
Francisco Jerez77e2b5e2009-12-16 19:05:00 +01001200 placement.placement = placement.busy_placement = &placement_memtype;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001201
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001202 tmp_reg = *new_reg;
1203 tmp_reg.mm_node = NULL;
1204 ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001205 if (ret)
1206 return ret;
1207
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001208 ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001209 if (ret)
1210 goto out;
1211
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001212 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001213 if (ret)
1214 goto out;
1215
1216out:
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001217 ttm_bo_mem_put(bo, &tmp_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001218 return ret;
1219}
1220
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001221static void
Nicolai Hähnle66257db2016-12-15 17:23:49 +01001222nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001223 struct ttm_mem_reg *new_reg)
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001224{
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001225 struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001226 struct nouveau_bo *nvbo = nouveau_bo(bo);
Ben Skeggs24e83752017-11-01 03:56:19 +10001227 struct nouveau_vma *vma;
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001228
Ben Skeggs9f1feed2012-01-25 15:34:22 +10001229 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1230 if (bo->destroy != nouveau_bo_del_ttm)
1231 return;
1232
Ben Skeggsa48296a2017-11-01 03:56:19 +10001233 if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001234 mem->mem.page == nvbo->page) {
Ben Skeggsa48296a2017-11-01 03:56:19 +10001235 list_for_each_entry(vma, &nvbo->vma_list, head) {
Ben Skeggs24e83752017-11-01 03:56:19 +10001236 nouveau_vma_map(vma, mem);
Ben Skeggsa48296a2017-11-01 03:56:19 +10001237 }
1238 } else {
1239 list_for_each_entry(vma, &nvbo->vma_list, head) {
Ben Skeggs10dcab32016-12-12 17:52:45 +10001240 WARN_ON(ttm_bo_wait(bo, false, false));
Ben Skeggs24e83752017-11-01 03:56:19 +10001241 nouveau_vma_unmap(vma);
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001242 }
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001243 }
1244}
1245
Ben Skeggs6ee73862009-12-11 19:24:15 +10001246static int
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001247nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg,
Ben Skeggsebb945a2012-07-20 08:17:34 +10001248 struct nouveau_drm_tile **new_tile)
Ben Skeggs6ee73862009-12-11 19:24:15 +10001249{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001250 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1251 struct drm_device *dev = drm->dev;
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001252 struct nouveau_bo *nvbo = nouveau_bo(bo);
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001253 u64 offset = new_reg->start << PAGE_SHIFT;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001254
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001255 *new_tile = NULL;
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001256 if (new_reg->mem_type != TTM_PL_VRAM)
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001257 return 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001258
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001259 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001260 *new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
Ben Skeggs7760a2e2017-11-01 03:56:19 +10001261 nvbo->mode, nvbo->zeta);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001262 }
1263
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001264 return 0;
1265}
Ben Skeggs6ee73862009-12-11 19:24:15 +10001266
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001267static void
1268nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
Ben Skeggsebb945a2012-07-20 08:17:34 +10001269 struct nouveau_drm_tile *new_tile,
1270 struct nouveau_drm_tile **old_tile)
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001271{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001272 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1273 struct drm_device *dev = drm->dev;
Chris Wilsonf54d1862016-10-25 13:00:45 +01001274 struct dma_fence *fence = reservation_object_get_excl(bo->resv);
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001275
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +02001276 nv10_bo_put_tile_region(dev, *old_tile, fence);
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001277 *old_tile = new_tile;
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001278}
1279
1280static int
1281nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001282 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001283{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001284 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001285 struct nouveau_bo *nvbo = nouveau_bo(bo);
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001286 struct ttm_mem_reg *old_reg = &bo->mem;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001287 struct nouveau_drm_tile *new_tile = NULL;
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001288 int ret = 0;
1289
Christian König88932a72016-06-06 10:17:53 +02001290 ret = ttm_bo_wait(bo, intr, no_wait_gpu);
1291 if (ret)
1292 return ret;
1293
Alexandre Courbot5be5a152014-10-27 18:11:52 +09001294 if (nvbo->pin_refcnt)
1295 NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
1296
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001297 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001298 ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001299 if (ret)
1300 return ret;
1301 }
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001302
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001303 /* Fake bo copy. */
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001304 if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
Ben Skeggs6ee73862009-12-11 19:24:15 +10001305 BUG_ON(bo->mem.mm_node != NULL);
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001306 bo->mem = *new_reg;
1307 new_reg->mm_node = NULL;
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001308 goto out;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001309 }
1310
Ben Skeggscef9e992013-11-22 10:52:54 +10001311 /* Hardware assisted copy. */
1312 if (drm->ttm.move) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001313 if (new_reg->mem_type == TTM_PL_SYSTEM)
Ben Skeggscef9e992013-11-22 10:52:54 +10001314 ret = nouveau_bo_move_flipd(bo, evict, intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001315 no_wait_gpu, new_reg);
1316 else if (old_reg->mem_type == TTM_PL_SYSTEM)
Ben Skeggscef9e992013-11-22 10:52:54 +10001317 ret = nouveau_bo_move_flips(bo, evict, intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001318 no_wait_gpu, new_reg);
Ben Skeggscef9e992013-11-22 10:52:54 +10001319 else
1320 ret = nouveau_bo_move_m2mf(bo, evict, intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001321 no_wait_gpu, new_reg);
Ben Skeggscef9e992013-11-22 10:52:54 +10001322 if (!ret)
1323 goto out;
Ben Skeggsb8a6a802010-08-27 11:55:43 +10001324 }
1325
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001326 /* Fallback to software copy. */
Christian König8aa6d4f2016-04-06 11:12:04 +02001327 ret = ttm_bo_wait(bo, intr, no_wait_gpu);
Ben Skeggscef9e992013-11-22 10:52:54 +10001328 if (ret == 0)
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001329 ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_reg);
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001330
1331out:
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001332 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001333 if (ret)
1334 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1335 else
1336 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1337 }
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001338
1339 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001340}
1341
1342static int
1343nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1344{
David Herrmannacb46522013-08-25 18:28:59 +02001345 struct nouveau_bo *nvbo = nouveau_bo(bo);
1346
David Herrmannd9a1f0b2016-09-01 14:48:33 +02001347 return drm_vma_node_verify_access(&nvbo->gem.vma_node,
1348 filp->private_data);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001349}
1350
Jerome Glissef32f02f2010-04-09 14:39:25 +02001351static int
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001352nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
Jerome Glissef32f02f2010-04-09 14:39:25 +02001353{
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001354 struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type];
Ben Skeggsebb945a2012-07-20 08:17:34 +10001355 struct nouveau_drm *drm = nouveau_bdev(bdev);
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001356 struct nvkm_device *device = nvxx_device(&drm->client.device);
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001357 struct nouveau_mem *mem = nouveau_mem(reg);
Ben Skeggsf869ef82010-11-15 11:53:16 +10001358 int ret;
Jerome Glissef32f02f2010-04-09 14:39:25 +02001359
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001360 reg->bus.addr = NULL;
1361 reg->bus.offset = 0;
1362 reg->bus.size = reg->num_pages << PAGE_SHIFT;
1363 reg->bus.base = 0;
1364 reg->bus.is_iomem = false;
Jerome Glissef32f02f2010-04-09 14:39:25 +02001365 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1366 return -EINVAL;
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001367 switch (reg->mem_type) {
Jerome Glissef32f02f2010-04-09 14:39:25 +02001368 case TTM_PL_SYSTEM:
1369 /* System memory */
1370 return 0;
1371 case TTM_PL_TT:
Daniel Vettera7fb8a22015-09-09 16:45:52 +02001372#if IS_ENABLED(CONFIG_AGP)
Ben Skeggs340b0e72015-08-20 14:54:23 +10001373 if (drm->agp.bridge) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001374 reg->bus.offset = reg->start << PAGE_SHIFT;
1375 reg->bus.base = drm->agp.base;
1376 reg->bus.is_iomem = !drm->agp.cma;
Jerome Glissef32f02f2010-04-09 14:39:25 +02001377 }
1378#endif
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001379 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || !mem->kind)
Maarten Lankhorsta5540902013-11-12 13:34:09 +01001380 /* untiled */
1381 break;
1382 /* fallthrough, tiled memory */
Jerome Glissef32f02f2010-04-09 14:39:25 +02001383 case TTM_PL_VRAM:
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001384 reg->bus.offset = reg->start << PAGE_SHIFT;
1385 reg->bus.base = device->func->resource_addr(device, 1);
1386 reg->bus.is_iomem = true;
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001387 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
Ben Skeggs570889d2017-11-01 03:56:19 +10001388 struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
Ben Skeggsd8e83992015-08-20 14:54:17 +10001389 int page_shift = 12;
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001390 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001391 page_shift = mem->mem.page;
Ben Skeggs3863c9b2012-07-14 19:09:17 +10001392
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001393 ret = nvkm_vm_get(bar, mem->_mem->size << 12,
1394 page_shift, NV_MEM_ACCESS_RW,
1395 &mem->bar_vma);
Ben Skeggs3863c9b2012-07-14 19:09:17 +10001396 if (ret)
1397 return ret;
1398
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001399 nvkm_vm_map(&mem->bar_vma, mem->_mem);
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001400 reg->bus.offset = mem->bar_vma.offset;
Ben Skeggs3863c9b2012-07-14 19:09:17 +10001401 }
Jerome Glissef32f02f2010-04-09 14:39:25 +02001402 break;
1403 default:
1404 return -EINVAL;
1405 }
1406 return 0;
1407}
1408
1409static void
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001410nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
Jerome Glissef32f02f2010-04-09 14:39:25 +02001411{
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001412 struct nouveau_mem *mem = nouveau_mem(reg);
Ben Skeggsf869ef82010-11-15 11:53:16 +10001413
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001414 if (!mem->bar_vma.node)
Ben Skeggsf869ef82010-11-15 11:53:16 +10001415 return;
1416
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001417 nvkm_vm_unmap(&mem->bar_vma);
1418 nvkm_vm_put(&mem->bar_vma);
Jerome Glissef32f02f2010-04-09 14:39:25 +02001419}
1420
1421static int
1422nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1423{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001424 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
Ben Skeggse1429b42010-09-10 11:12:25 +10001425 struct nouveau_bo *nvbo = nouveau_bo(bo);
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001426 struct nvkm_device *device = nvxx_device(&drm->client.device);
Ben Skeggs7e8820f2015-08-20 14:54:23 +10001427 u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
Christian Königf1217ed2014-08-27 13:16:04 +02001428 int i, ret;
Ben Skeggse1429b42010-09-10 11:12:25 +10001429
1430 /* as long as the bo isn't in vram, and isn't tiled, we've got
1431 * nothing to do here.
1432 */
1433 if (bo->mem.mem_type != TTM_PL_VRAM) {
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001434 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
Ben Skeggs7760a2e2017-11-01 03:56:19 +10001435 !nvbo->kind)
Ben Skeggse1429b42010-09-10 11:12:25 +10001436 return 0;
Maarten Lankhorsta5540902013-11-12 13:34:09 +01001437
1438 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1439 nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1440
1441 ret = nouveau_bo_validate(nvbo, false, false);
1442 if (ret)
1443 return ret;
1444 }
1445 return 0;
Ben Skeggse1429b42010-09-10 11:12:25 +10001446 }
1447
1448 /* make sure bo is in mappable vram */
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001449 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
Maarten Lankhorsta5540902013-11-12 13:34:09 +01001450 bo->mem.start + bo->mem.num_pages < mappable)
Ben Skeggse1429b42010-09-10 11:12:25 +10001451 return 0;
1452
Christian Königf1217ed2014-08-27 13:16:04 +02001453 for (i = 0; i < nvbo->placement.num_placement; ++i) {
1454 nvbo->placements[i].fpfn = 0;
1455 nvbo->placements[i].lpfn = mappable;
1456 }
Ben Skeggse1429b42010-09-10 11:12:25 +10001457
Christian Königf1217ed2014-08-27 13:16:04 +02001458 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1459 nvbo->busy_placements[i].fpfn = 0;
1460 nvbo->busy_placements[i].lpfn = mappable;
1461 }
1462
Dave Airliec2848152012-05-18 15:31:12 +01001463 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001464 return nouveau_bo_validate(nvbo, false, false);
Jerome Glissef32f02f2010-04-09 14:39:25 +02001465}
1466
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001467static int
1468nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1469{
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001470 struct ttm_dma_tt *ttm_dma = (void *)ttm;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001471 struct nouveau_drm *drm;
Ben Skeggsbe83cd42015-01-14 15:36:34 +10001472 struct nvkm_device *device;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001473 struct drm_device *dev;
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001474 struct device *pdev;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001475 unsigned i;
1476 int r;
Dave Airlie22b33e82012-04-02 11:53:06 +01001477 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001478
1479 if (ttm->state != tt_unpopulated)
1480 return 0;
1481
Dave Airlie22b33e82012-04-02 11:53:06 +01001482 if (slave && ttm->sg) {
1483 /* make userspace faulting work */
1484 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1485 ttm_dma->dma_address, ttm->num_pages);
1486 ttm->state = tt_unbound;
1487 return 0;
1488 }
1489
Ben Skeggsebb945a2012-07-20 08:17:34 +10001490 drm = nouveau_bdev(ttm->bdev);
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001491 device = nvxx_device(&drm->client.device);
Ben Skeggsebb945a2012-07-20 08:17:34 +10001492 dev = drm->dev;
Ben Skeggs26c9e8e2015-08-20 14:54:23 +10001493 pdev = device->dev;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001494
Daniel Vettera7fb8a22015-09-09 16:45:52 +02001495#if IS_ENABLED(CONFIG_AGP)
Ben Skeggs340b0e72015-08-20 14:54:23 +10001496 if (drm->agp.bridge) {
Jerome Glissedea7e0a2012-01-03 17:37:37 -05001497 return ttm_agp_tt_populate(ttm);
1498 }
1499#endif
1500
Alexandre Courbot9bcd38d2016-03-02 19:12:27 +09001501#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001502 if (swiotlb_nr_tbl()) {
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001503 return ttm_dma_populate((void *)ttm, dev->dev);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001504 }
1505#endif
1506
1507 r = ttm_pool_populate(ttm);
1508 if (r) {
1509 return r;
1510 }
1511
1512 for (i = 0; i < ttm->num_pages; i++) {
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001513 dma_addr_t addr;
1514
1515 addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
1516 DMA_BIDIRECTIONAL);
1517
1518 if (dma_mapping_error(pdev, addr)) {
Rasmus Villemoes4fbbed42016-02-15 19:41:46 +01001519 while (i--) {
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001520 dma_unmap_page(pdev, ttm_dma->dma_address[i],
1521 PAGE_SIZE, DMA_BIDIRECTIONAL);
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001522 ttm_dma->dma_address[i] = 0;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001523 }
1524 ttm_pool_unpopulate(ttm);
1525 return -EFAULT;
1526 }
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001527
1528 ttm_dma->dma_address[i] = addr;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001529 }
1530 return 0;
1531}
1532
1533static void
1534nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1535{
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001536 struct ttm_dma_tt *ttm_dma = (void *)ttm;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001537 struct nouveau_drm *drm;
Ben Skeggsbe83cd42015-01-14 15:36:34 +10001538 struct nvkm_device *device;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001539 struct drm_device *dev;
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001540 struct device *pdev;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001541 unsigned i;
Dave Airlie22b33e82012-04-02 11:53:06 +01001542 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1543
1544 if (slave)
1545 return;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001546
Ben Skeggsebb945a2012-07-20 08:17:34 +10001547 drm = nouveau_bdev(ttm->bdev);
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001548 device = nvxx_device(&drm->client.device);
Ben Skeggsebb945a2012-07-20 08:17:34 +10001549 dev = drm->dev;
Ben Skeggs26c9e8e2015-08-20 14:54:23 +10001550 pdev = device->dev;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001551
Daniel Vettera7fb8a22015-09-09 16:45:52 +02001552#if IS_ENABLED(CONFIG_AGP)
Ben Skeggs340b0e72015-08-20 14:54:23 +10001553 if (drm->agp.bridge) {
Jerome Glissedea7e0a2012-01-03 17:37:37 -05001554 ttm_agp_tt_unpopulate(ttm);
1555 return;
1556 }
1557#endif
1558
Alexandre Courbot9bcd38d2016-03-02 19:12:27 +09001559#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001560 if (swiotlb_nr_tbl()) {
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001561 ttm_dma_unpopulate((void *)ttm, dev->dev);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001562 return;
1563 }
1564#endif
1565
1566 for (i = 0; i < ttm->num_pages; i++) {
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001567 if (ttm_dma->dma_address[i]) {
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001568 dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
1569 DMA_BIDIRECTIONAL);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001570 }
1571 }
1572
1573 ttm_pool_unpopulate(ttm);
1574}
1575
Maarten Lankhorstdd7cfd62014-01-21 13:07:31 +01001576void
Maarten Lankhorst809e9442014-04-09 16:19:30 +02001577nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
Maarten Lankhorstdd7cfd62014-01-21 13:07:31 +01001578{
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +01001579 struct reservation_object *resv = nvbo->bo.resv;
Maarten Lankhorstdd7cfd62014-01-21 13:07:31 +01001580
Maarten Lankhorst809e9442014-04-09 16:19:30 +02001581 if (exclusive)
1582 reservation_object_add_excl_fence(resv, &fence->base);
1583 else if (fence)
1584 reservation_object_add_shared_fence(resv, &fence->base);
Maarten Lankhorstdd7cfd62014-01-21 13:07:31 +01001585}
1586
Ben Skeggs6ee73862009-12-11 19:24:15 +10001587struct ttm_bo_driver nouveau_bo_driver = {
Jerome Glisse649bf3c2011-11-01 20:46:13 -04001588 .ttm_tt_create = &nouveau_ttm_tt_create,
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001589 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1590 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
Ben Skeggs6ee73862009-12-11 19:24:15 +10001591 .invalidate_caches = nouveau_bo_invalidate_caches,
1592 .init_mem_type = nouveau_bo_init_mem_type,
Christian Königa2ab19fe2016-08-30 17:26:04 +02001593 .eviction_valuable = ttm_bo_eviction_valuable,
Ben Skeggs6ee73862009-12-11 19:24:15 +10001594 .evict_flags = nouveau_bo_evict_flags,
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001595 .move_notify = nouveau_bo_move_ntfy,
Ben Skeggs6ee73862009-12-11 19:24:15 +10001596 .move = nouveau_bo_move,
1597 .verify_access = nouveau_bo_verify_access,
Jerome Glissef32f02f2010-04-09 14:39:25 +02001598 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1599 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1600 .io_mem_free = &nouveau_ttm_io_mem_free,
Christian Königea642c32017-03-28 16:54:50 +02001601 .io_mem_pfn = ttm_bo_default_io_mem_pfn,
Ben Skeggs6ee73862009-12-11 19:24:15 +10001602};