blob: 3577ab2d5d694d6a8eade4866a9037bd830d3629 [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
Ben Skeggsfdb751e2014-08-10 04:10:23 +100030#include <linux/dma-mapping.h>
Chris Metcalf3e2b7562013-02-01 13:44:33 -050031#include <linux/swiotlb.h>
Ben Skeggs6ee73862009-12-11 19:24:15 +100032
Ben Skeggs4dc28132016-05-20 09:22:55 +100033#include "nouveau_drv.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100034#include "nouveau_dma.h"
Ben Skeggsd375e7d52012-04-30 13:30:00 +100035#include "nouveau_fence.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100036
Ben Skeggsebb945a2012-07-20 08:17:34 +100037#include "nouveau_bo.h"
38#include "nouveau_ttm.h"
39#include "nouveau_gem.h"
Ben Skeggs9ce523c2017-11-01 03:56:19 +100040#include "nouveau_mem.h"
Ben Skeggs24e83752017-11-01 03:56:19 +100041#include "nouveau_vmm.h"
Maarten Maathuisa5106042009-12-26 21:46:36 +010042
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100043/*
44 * NV10-NV40 tiling helpers
45 */
46
47static void
Ben Skeggsebb945a2012-07-20 08:17:34 +100048nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
49 u32 addr, u32 size, u32 pitch, u32 flags)
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100050{
Ben Skeggs77145f12012-07-31 16:16:21 +100051 struct nouveau_drm *drm = nouveau_drm(dev);
Ben Skeggsebb945a2012-07-20 08:17:34 +100052 int i = reg - drm->tile.reg;
Ben Skeggs359088d2017-11-01 03:56:19 +100053 struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
Ben Skeggsb1e45532015-08-20 14:54:06 +100054 struct nvkm_fb_tile *tile = &fb->tile.region[i];
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100055
Ben Skeggsebb945a2012-07-20 08:17:34 +100056 nouveau_fence_unref(&reg->fence);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100057
58 if (tile->pitch)
Ben Skeggs03c89522015-08-20 14:54:20 +100059 nvkm_fb_tile_fini(fb, i, tile);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100060
61 if (pitch)
Ben Skeggs03c89522015-08-20 14:54:20 +100062 nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100063
Ben Skeggs03c89522015-08-20 14:54:20 +100064 nvkm_fb_tile_prog(fb, i, tile);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100065}
66
Ben Skeggsebb945a2012-07-20 08:17:34 +100067static struct nouveau_drm_tile *
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100068nv10_bo_get_tile_region(struct drm_device *dev, int i)
69{
Ben Skeggs77145f12012-07-31 16:16:21 +100070 struct nouveau_drm *drm = nouveau_drm(dev);
Ben Skeggsebb945a2012-07-20 08:17:34 +100071 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100072
Ben Skeggsebb945a2012-07-20 08:17:34 +100073 spin_lock(&drm->tile.lock);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100074
75 if (!tile->used &&
76 (!tile->fence || nouveau_fence_done(tile->fence)))
77 tile->used = true;
78 else
79 tile = NULL;
80
Ben Skeggsebb945a2012-07-20 08:17:34 +100081 spin_unlock(&drm->tile.lock);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100082 return tile;
83}
84
85static void
Ben Skeggsebb945a2012-07-20 08:17:34 +100086nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
Chris Wilsonf54d1862016-10-25 13:00:45 +010087 struct dma_fence *fence)
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100088{
Ben Skeggs77145f12012-07-31 16:16:21 +100089 struct nouveau_drm *drm = nouveau_drm(dev);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100090
91 if (tile) {
Ben Skeggsebb945a2012-07-20 08:17:34 +100092 spin_lock(&drm->tile.lock);
Chris Wilsonf54d1862016-10-25 13:00:45 +010093 tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100094 tile->used = false;
Ben Skeggsebb945a2012-07-20 08:17:34 +100095 spin_unlock(&drm->tile.lock);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100096 }
97}
98
Ben Skeggsebb945a2012-07-20 08:17:34 +100099static struct nouveau_drm_tile *
100nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000101 u32 size, u32 pitch, u32 zeta)
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000102{
Ben Skeggs77145f12012-07-31 16:16:21 +1000103 struct nouveau_drm *drm = nouveau_drm(dev);
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000104 struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
Ben Skeggsebb945a2012-07-20 08:17:34 +1000105 struct nouveau_drm_tile *tile, *found = NULL;
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000106 int i;
107
Ben Skeggsb1e45532015-08-20 14:54:06 +1000108 for (i = 0; i < fb->tile.regions; i++) {
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000109 tile = nv10_bo_get_tile_region(dev, i);
110
111 if (pitch && !found) {
112 found = tile;
113 continue;
114
Ben Skeggsb1e45532015-08-20 14:54:06 +1000115 } else if (tile && fb->tile.region[i].pitch) {
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000116 /* Kill an unused tile region. */
117 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
118 }
119
120 nv10_bo_put_tile_region(dev, tile, NULL);
121 }
122
123 if (found)
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000124 nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000125 return found;
126}
127
Ben Skeggs6ee73862009-12-11 19:24:15 +1000128static void
129nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
130{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000131 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
132 struct drm_device *dev = drm->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000133 struct nouveau_bo *nvbo = nouveau_bo(bo);
134
David Herrmann55fb74a2013-10-02 10:15:17 +0200135 if (unlikely(nvbo->gem.filp))
Ben Skeggs6ee73862009-12-11 19:24:15 +1000136 DRM_ERROR("bo %p still attached to GEM object\n", bo);
Maarten Lankhorst4f385592013-07-07 10:37:35 +0200137 WARN_ON(nvbo->pin_refcnt > 0);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000138 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000139 kfree(nvbo);
140}
141
Ben Skeggs4d8b3d32016-05-23 12:34:49 +1000142static inline u64
143roundup_64(u64 x, u32 y)
144{
145 x += y - 1;
146 do_div(x, y);
147 return x * y;
148}
149
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100150static void
Ben Skeggsdb5c8e22011-02-10 13:41:01 +1000151nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
Ben Skeggs4d8b3d32016-05-23 12:34:49 +1000152 int *align, u64 *size)
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100153{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000154 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000155 struct nvif_device *device = &drm->client.device;
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100156
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000157 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000158 if (nvbo->mode) {
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000159 if (device->info.chipset >= 0x40) {
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100160 *align = 65536;
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000161 *size = roundup_64(*size, 64 * nvbo->mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100162
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000163 } else if (device->info.chipset >= 0x30) {
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100164 *align = 32768;
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000165 *size = roundup_64(*size, 64 * nvbo->mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100166
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000167 } else if (device->info.chipset >= 0x20) {
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100168 *align = 16384;
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000169 *size = roundup_64(*size, 64 * nvbo->mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100170
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000171 } else if (device->info.chipset >= 0x10) {
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100172 *align = 16384;
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000173 *size = roundup_64(*size, 32 * nvbo->mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100174 }
175 }
Ben Skeggsbfd83ac2010-11-12 15:12:51 +1000176 } else {
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000177 *size = roundup_64(*size, (1 << nvbo->page));
178 *align = max((1 << nvbo->page), *align);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100179 }
180
Ben Skeggs4d8b3d32016-05-23 12:34:49 +1000181 *size = roundup_64(*size, PAGE_SIZE);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100182}
183
Ben Skeggs6ee73862009-12-11 19:24:15 +1000184int
Ben Skeggs4d8b3d32016-05-23 12:34:49 +1000185nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
Ben Skeggs7375c952011-06-07 14:21:29 +1000186 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
Maarten Lankhorstbb6178b2014-01-09 11:03:15 +0100187 struct sg_table *sg, struct reservation_object *robj,
Ben Skeggs7375c952011-06-07 14:21:29 +1000188 struct nouveau_bo **pnvbo)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000189{
Ben Skeggse75c0912017-11-01 03:56:19 +1000190 struct nouveau_drm *drm = cli->drm;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000191 struct nouveau_bo *nvbo;
Ben Skeggsa220dd72017-11-01 03:56:19 +1000192 struct nvif_mmu *mmu = &cli->mmu;
Jerome Glisse57de4ba2011-11-11 15:42:57 -0500193 size_t acc_size;
Ben Skeggsf91bac52011-06-06 14:15:46 +1000194 int ret;
Dave Airlie22b33e82012-04-02 11:53:06 +0100195 int type = ttm_bo_type_device;
Maarten Lankhorst35095f72013-07-27 10:17:12 +0200196
Ben Skeggs4d8b3d32016-05-23 12:34:49 +1000197 if (!size) {
198 NV_WARN(drm, "skipped size %016llx\n", size);
Maarten Lankhorst0108bc82013-07-07 10:40:19 +0200199 return -EINVAL;
200 }
Dave Airlie22b33e82012-04-02 11:53:06 +0100201
202 if (sg)
203 type = ttm_bo_type_sg;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000204
205 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
206 if (!nvbo)
207 return -ENOMEM;
208 INIT_LIST_HEAD(&nvbo->head);
209 INIT_LIST_HEAD(&nvbo->entry);
Ben Skeggsfd2871a2011-06-06 14:07:04 +1000210 INIT_LIST_HEAD(&nvbo->vma_list);
Ben Skeggsebb945a2012-07-20 08:17:34 +1000211 nvbo->bo.bdev = &drm->ttm.bdev;
Ben Skeggsbab7cc12016-05-24 17:26:48 +1000212 nvbo->cli = cli;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000213
Ben Skeggsacb16cf2017-11-01 03:56:20 +1000214 /* This is confusing, and doesn't actually mean we want an uncached
215 * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
216 * into in nouveau_gem_new().
217 */
218 if (flags & TTM_PL_FLAG_UNCACHED) {
219 /* Determine if we can get a cache-coherent map, forcing
220 * uncached mapping if we can't.
221 */
222 if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED)
223 nvbo->force_coherent = true;
224 }
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900225
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000226 if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
227 nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
Ben Skeggsa220dd72017-11-01 03:56:19 +1000228 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
229 kfree(nvbo);
230 return -EINVAL;
231 }
232
233 nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000234 } else
235 if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
236 nvbo->kind = (tile_flags & 0x00007f00) >> 8;
237 nvbo->comp = (tile_flags & 0x00030000) >> 16;
Ben Skeggsa220dd72017-11-01 03:56:19 +1000238 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
239 kfree(nvbo);
240 return -EINVAL;
241 }
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000242 } else {
243 nvbo->zeta = (tile_flags & 0x00000007);
244 }
245 nvbo->mode = tile_mode;
246 nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
247
248 nvbo->page = 12;
Ben Skeggs3ee6f5b2014-08-10 04:10:20 +1000249 if (drm->client.vm) {
Ben Skeggsf91bac52011-06-06 14:15:46 +1000250 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000251 nvbo->page = drm->client.vm->mmu->lpg_shift;
252 else {
253 if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
Ben Skeggsa220dd72017-11-01 03:56:19 +1000254 nvbo->kind = mmu->kind[nvbo->kind];
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000255 nvbo->comp = 0;
256 }
Ben Skeggsf91bac52011-06-06 14:15:46 +1000257 }
258
259 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
Ben Skeggsfd2871a2011-06-06 14:07:04 +1000260 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
261 nouveau_bo_placement_set(nvbo, flags, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000262
Ben Skeggsebb945a2012-07-20 08:17:34 +1000263 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
Jerome Glisse57de4ba2011-11-11 15:42:57 -0500264 sizeof(struct nouveau_bo));
265
Ben Skeggsebb945a2012-07-20 08:17:34 +1000266 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
Dave Airlie22b33e82012-04-02 11:53:06 +0100267 type, &nvbo->placement,
Marcin Slusarz0b91c4a2012-11-06 21:49:51 +0000268 align >> PAGE_SHIFT, false, NULL, acc_size, sg,
Maarten Lankhorstbb6178b2014-01-09 11:03:15 +0100269 robj, nouveau_bo_del_ttm);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000270 if (ret) {
271 /* ttm will call nouveau_bo_del_ttm if it fails.. */
272 return ret;
273 }
274
Ben Skeggs6ee73862009-12-11 19:24:15 +1000275 *pnvbo = nvbo;
276 return 0;
277}
278
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100279static void
Christian Königf1217ed2014-08-27 13:16:04 +0200280set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000281{
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100282 *n = 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000283
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100284 if (type & TTM_PL_FLAG_VRAM)
Christian Königf1217ed2014-08-27 13:16:04 +0200285 pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100286 if (type & TTM_PL_FLAG_TT)
Christian Königf1217ed2014-08-27 13:16:04 +0200287 pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100288 if (type & TTM_PL_FLAG_SYSTEM)
Christian Königf1217ed2014-08-27 13:16:04 +0200289 pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100290}
Ben Skeggs37cb3e082009-12-16 16:22:42 +1000291
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200292static void
293set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
294{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000295 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000296 u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
Christian Königf1217ed2014-08-27 13:16:04 +0200297 unsigned i, fpfn, lpfn;
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200298
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000299 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000300 nvbo->mode && (type & TTM_PL_FLAG_VRAM) &&
Francisco Jerez4beb1162011-11-06 21:21:28 +0100301 nvbo->bo.mem.num_pages < vram_pages / 4) {
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200302 /*
303 * Make sure that the color and depth buffers are handled
304 * by independent memory controller units. Up to a 9x
305 * speed up when alpha-blending and depth-test are enabled
306 * at the same time.
307 */
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000308 if (nvbo->zeta) {
Christian Königf1217ed2014-08-27 13:16:04 +0200309 fpfn = vram_pages / 2;
310 lpfn = ~0;
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200311 } else {
Christian Königf1217ed2014-08-27 13:16:04 +0200312 fpfn = 0;
313 lpfn = vram_pages / 2;
314 }
315 for (i = 0; i < nvbo->placement.num_placement; ++i) {
316 nvbo->placements[i].fpfn = fpfn;
317 nvbo->placements[i].lpfn = lpfn;
318 }
319 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
320 nvbo->busy_placements[i].fpfn = fpfn;
321 nvbo->busy_placements[i].lpfn = lpfn;
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200322 }
323 }
324}
325
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100326void
327nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
328{
329 struct ttm_placement *pl = &nvbo->placement;
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900330 uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
331 TTM_PL_MASK_CACHING) |
332 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100333
334 pl->placement = nvbo->placements;
335 set_placement_list(nvbo->placements, &pl->num_placement,
336 type, flags);
337
338 pl->busy_placement = nvbo->busy_placements;
339 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
340 type | busy, flags);
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200341
342 set_placement_range(nvbo, type);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000343}
344
345int
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000346nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000347{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000348 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000349 struct ttm_buffer_object *bo = &nvbo->bo;
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000350 bool force = false, evict = false;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100351 int ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000352
Christian Königdfd5e502016-04-06 11:12:03 +0200353 ret = ttm_bo_reserve(bo, false, false, NULL);
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100354 if (ret)
Ben Skeggs50ab2e52014-11-10 11:12:17 +1000355 return ret;
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100356
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000357 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000358 memtype == TTM_PL_FLAG_VRAM && contig) {
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000359 if (!nvbo->contig) {
360 nvbo->contig = true;
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000361 force = true;
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000362 evict = true;
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000363 }
364 }
365
366 if (nvbo->pin_refcnt) {
367 if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
368 NV_ERROR(drm, "bo %p pinned elsewhere: "
369 "0x%08x vs 0x%08x\n", bo,
370 1 << bo->mem.mem_type, memtype);
371 ret = -EBUSY;
372 }
373 nvbo->pin_refcnt++;
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100374 goto out;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000375 }
376
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000377 if (evict) {
378 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
379 ret = nouveau_bo_validate(nvbo, false, false);
380 if (ret)
381 goto out;
382 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000383
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000384 nvbo->pin_refcnt++;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100385 nouveau_bo_placement_set(nvbo, memtype, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000386
Ben Skeggs50ab2e52014-11-10 11:12:17 +1000387 /* drop pin_refcnt temporarily, so we don't trip the assertion
388 * in nouveau_bo_move() that makes sure we're not trying to
389 * move a pinned buffer
390 */
391 nvbo->pin_refcnt--;
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000392 ret = nouveau_bo_validate(nvbo, false, false);
Ben Skeggs6aac6ce2014-11-06 14:34:31 +1000393 if (ret)
394 goto out;
Ben Skeggs50ab2e52014-11-10 11:12:17 +1000395 nvbo->pin_refcnt++;
Ben Skeggs6aac6ce2014-11-06 14:34:31 +1000396
397 switch (bo->mem.mem_type) {
398 case TTM_PL_VRAM:
399 drm->gem.vram_available -= bo->mem.size;
400 break;
401 case TTM_PL_TT:
402 drm->gem.gart_available -= bo->mem.size;
403 break;
404 default:
405 break;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000406 }
Alexandre Courbot5be5a152014-10-27 18:11:52 +0900407
Ben Skeggs6ee73862009-12-11 19:24:15 +1000408out:
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000409 if (force && ret)
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000410 nvbo->contig = false;
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100411 ttm_bo_unreserve(bo);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000412 return ret;
413}
414
415int
416nouveau_bo_unpin(struct nouveau_bo *nvbo)
417{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000418 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000419 struct ttm_buffer_object *bo = &nvbo->bo;
Maarten Lankhorst4f385592013-07-07 10:37:35 +0200420 int ret, ref;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000421
Christian Königdfd5e502016-04-06 11:12:03 +0200422 ret = ttm_bo_reserve(bo, false, false, NULL);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000423 if (ret)
424 return ret;
425
Maarten Lankhorst4f385592013-07-07 10:37:35 +0200426 ref = --nvbo->pin_refcnt;
427 WARN_ON_ONCE(ref < 0);
428 if (ref)
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100429 goto out;
430
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100431 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000432
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000433 ret = nouveau_bo_validate(nvbo, false, false);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000434 if (ret == 0) {
435 switch (bo->mem.mem_type) {
436 case TTM_PL_VRAM:
Ben Skeggsebb945a2012-07-20 08:17:34 +1000437 drm->gem.vram_available += bo->mem.size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000438 break;
439 case TTM_PL_TT:
Ben Skeggsebb945a2012-07-20 08:17:34 +1000440 drm->gem.gart_available += bo->mem.size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000441 break;
442 default:
443 break;
444 }
445 }
446
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100447out:
Ben Skeggs6ee73862009-12-11 19:24:15 +1000448 ttm_bo_unreserve(bo);
449 return ret;
450}
451
452int
453nouveau_bo_map(struct nouveau_bo *nvbo)
454{
455 int ret;
456
Christian Königdfd5e502016-04-06 11:12:03 +0200457 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000458 if (ret)
459 return ret;
460
Alexandre Courbot36a471b2016-07-13 15:29:35 +0900461 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900462
Ben Skeggs6ee73862009-12-11 19:24:15 +1000463 ttm_bo_unreserve(&nvbo->bo);
464 return ret;
465}
466
467void
468nouveau_bo_unmap(struct nouveau_bo *nvbo)
469{
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900470 if (!nvbo)
471 return;
472
Alexandre Courbot36a471b2016-07-13 15:29:35 +0900473 ttm_bo_kunmap(&nvbo->kmap);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000474}
475
Alexandre Courbotb22870b2014-10-27 18:49:19 +0900476void
477nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
478{
479 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Alexandre Courbotb22870b2014-10-27 18:49:19 +0900480 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
481 int i;
482
483 if (!ttm_dma)
484 return;
485
486 /* Don't waste time looping if the object is coherent */
487 if (nvbo->force_coherent)
488 return;
489
490 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
Ben Skeggs359088d2017-11-01 03:56:19 +1000491 dma_sync_single_for_device(drm->dev->dev,
492 ttm_dma->dma_address[i],
Ben Skeggs26c9e8e2015-08-20 14:54:23 +1000493 PAGE_SIZE, DMA_TO_DEVICE);
Alexandre Courbotb22870b2014-10-27 18:49:19 +0900494}
495
496void
497nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
498{
499 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Alexandre Courbotb22870b2014-10-27 18:49:19 +0900500 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
501 int i;
502
503 if (!ttm_dma)
504 return;
505
506 /* Don't waste time looping if the object is coherent */
507 if (nvbo->force_coherent)
508 return;
509
510 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
Ben Skeggs359088d2017-11-01 03:56:19 +1000511 dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
Ben Skeggs26c9e8e2015-08-20 14:54:23 +1000512 PAGE_SIZE, DMA_FROM_DEVICE);
Alexandre Courbotb22870b2014-10-27 18:49:19 +0900513}
514
Ben Skeggs7a45d762010-11-22 08:50:27 +1000515int
516nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000517 bool no_wait_gpu)
Ben Skeggs7a45d762010-11-22 08:50:27 +1000518{
519 int ret;
520
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000521 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
522 interruptible, no_wait_gpu);
Ben Skeggs7a45d762010-11-22 08:50:27 +1000523 if (ret)
524 return ret;
525
Alexandre Courbotb22870b2014-10-27 18:49:19 +0900526 nouveau_bo_sync_for_device(nvbo);
527
Ben Skeggs7a45d762010-11-22 08:50:27 +1000528 return 0;
529}
530
Ben Skeggs6ee73862009-12-11 19:24:15 +1000531void
532nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
533{
534 bool is_iomem;
535 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900536
Alexandre Courbot36a471b2016-07-13 15:29:35 +0900537 mem += index;
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900538
Ben Skeggs6ee73862009-12-11 19:24:15 +1000539 if (is_iomem)
540 iowrite16_native(val, (void __force __iomem *)mem);
541 else
542 *mem = val;
543}
544
545u32
546nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
547{
548 bool is_iomem;
549 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900550
Alexandre Courbot36a471b2016-07-13 15:29:35 +0900551 mem += index;
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900552
Ben Skeggs6ee73862009-12-11 19:24:15 +1000553 if (is_iomem)
554 return ioread32_native((void __force __iomem *)mem);
555 else
556 return *mem;
557}
558
559void
560nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
561{
562 bool is_iomem;
563 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900564
Alexandre Courbot36a471b2016-07-13 15:29:35 +0900565 mem += index;
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900566
Ben Skeggs6ee73862009-12-11 19:24:15 +1000567 if (is_iomem)
568 iowrite32_native(val, (void __force __iomem *)mem);
569 else
570 *mem = val;
571}
572
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400573static struct ttm_tt *
Ben Skeggsebb945a2012-07-20 08:17:34 +1000574nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
575 uint32_t page_flags, struct page *dummy_read)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000576{
Daniel Vettera7fb8a22015-09-09 16:45:52 +0200577#if IS_ENABLED(CONFIG_AGP)
Ben Skeggsebb945a2012-07-20 08:17:34 +1000578 struct nouveau_drm *drm = nouveau_bdev(bdev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000579
Ben Skeggs340b0e72015-08-20 14:54:23 +1000580 if (drm->agp.bridge) {
581 return ttm_agp_tt_create(bdev, drm->agp.bridge, size,
Ben Skeggsebb945a2012-07-20 08:17:34 +1000582 page_flags, dummy_read);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000583 }
Max Filippovdf1b4b92012-10-14 01:58:26 +0400584#endif
Ben Skeggs6ee73862009-12-11 19:24:15 +1000585
Ben Skeggsebb945a2012-07-20 08:17:34 +1000586 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000587}
588
589static int
590nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
591{
592 /* We'll do this from user space. */
593 return 0;
594}
595
596static int
597nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
598 struct ttm_mem_type_manager *man)
599{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000600 struct nouveau_drm *drm = nouveau_bdev(bdev);
Ben Skeggsb3472022017-11-01 03:56:20 +1000601 struct nvif_mmu *mmu = &drm->client.mmu;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000602
603 switch (type) {
604 case TTM_PL_SYSTEM:
605 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
606 man->available_caching = TTM_PL_MASK_CACHING;
607 man->default_caching = TTM_PL_FLAG_CACHED;
608 break;
609 case TTM_PL_VRAM:
Alexandre Courbote2a4e782014-06-27 19:28:50 +0900610 man->flags = TTM_MEMTYPE_FLAG_FIXED |
611 TTM_MEMTYPE_FLAG_MAPPABLE;
612 man->available_caching = TTM_PL_FLAG_UNCACHED |
613 TTM_PL_FLAG_WC;
614 man->default_caching = TTM_PL_FLAG_WC;
615
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000616 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
Alexandre Courbote2a4e782014-06-27 19:28:50 +0900617 /* Some BARs do not support being ioremapped WC */
Ben Skeggsb3472022017-11-01 03:56:20 +1000618 const u8 type = mmu->type[drm->ttm.type_vram].type;
619 if (type & NVIF_MEM_UNCACHED) {
Alexandre Courbote2a4e782014-06-27 19:28:50 +0900620 man->available_caching = TTM_PL_FLAG_UNCACHED;
621 man->default_caching = TTM_PL_FLAG_UNCACHED;
622 }
623
Ben Skeggs573a2a32010-08-25 15:26:04 +1000624 man->func = &nouveau_vram_manager;
Ben Skeggsf869ef82010-11-15 11:53:16 +1000625 man->io_reserve_fastpath = false;
626 man->use_io_reserve_lru = true;
627 } else {
Ben Skeggs573a2a32010-08-25 15:26:04 +1000628 man->func = &ttm_bo_manager_func;
Ben Skeggsf869ef82010-11-15 11:53:16 +1000629 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000630 break;
631 case TTM_PL_TT:
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000632 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
Ben Skeggs26c0c9e2011-02-10 12:59:51 +1000633 man->func = &nouveau_gart_manager;
634 else
Ben Skeggs340b0e72015-08-20 14:54:23 +1000635 if (!drm->agp.bridge)
Ben Skeggs3863c9b2012-07-14 19:09:17 +1000636 man->func = &nv04_gart_manager;
637 else
Ben Skeggs26c0c9e2011-02-10 12:59:51 +1000638 man->func = &ttm_bo_manager_func;
Ben Skeggsebb945a2012-07-20 08:17:34 +1000639
Ben Skeggs340b0e72015-08-20 14:54:23 +1000640 if (drm->agp.bridge) {
Jerome Glissef32f02f2010-04-09 14:39:25 +0200641 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
Francisco Jereza3d487e2010-11-20 22:11:22 +0100642 man->available_caching = TTM_PL_FLAG_UNCACHED |
643 TTM_PL_FLAG_WC;
644 man->default_caching = TTM_PL_FLAG_WC;
Ben Skeggsebb945a2012-07-20 08:17:34 +1000645 } else {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000646 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
647 TTM_MEMTYPE_FLAG_CMA;
648 man->available_caching = TTM_PL_MASK_CACHING;
649 man->default_caching = TTM_PL_FLAG_CACHED;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000650 }
Ben Skeggsebb945a2012-07-20 08:17:34 +1000651
Ben Skeggs6ee73862009-12-11 19:24:15 +1000652 break;
653 default:
Ben Skeggs6ee73862009-12-11 19:24:15 +1000654 return -EINVAL;
655 }
656 return 0;
657}
658
659static void
660nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
661{
662 struct nouveau_bo *nvbo = nouveau_bo(bo);
663
664 switch (bo->mem.mem_type) {
Francisco Jerez22fbd532009-12-11 18:40:17 +0100665 case TTM_PL_VRAM:
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100666 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
667 TTM_PL_FLAG_SYSTEM);
Francisco Jerez22fbd532009-12-11 18:40:17 +0100668 break;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000669 default:
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100670 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000671 break;
672 }
Francisco Jerez22fbd532009-12-11 18:40:17 +0100673
674 *pl = nvbo->placement;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000675}
676
677
Ben Skeggs6ee73862009-12-11 19:24:15 +1000678static int
Ben Skeggs49981042012-08-06 19:38:25 +1000679nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
680{
681 int ret = RING_SPACE(chan, 2);
682 if (ret == 0) {
683 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
Ben Skeggs00fc6f62013-07-09 14:20:15 +1000684 OUT_RING (chan, handle & 0x0000ffff);
Ben Skeggs49981042012-08-06 19:38:25 +1000685 FIRE_RING (chan);
686 }
687 return ret;
688}
689
690static int
Ben Skeggsc6b7e892012-03-20 14:36:04 +1000691nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000692 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggsc6b7e892012-03-20 14:36:04 +1000693{
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000694 struct nouveau_mem *mem = nouveau_mem(old_reg);
Ben Skeggsc6b7e892012-03-20 14:36:04 +1000695 int ret = RING_SPACE(chan, 10);
696 if (ret == 0) {
Ben Skeggs6d597022012-04-01 21:09:13 +1000697 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000698 OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
699 OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
700 OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
701 OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
Ben Skeggsc6b7e892012-03-20 14:36:04 +1000702 OUT_RING (chan, PAGE_SIZE);
703 OUT_RING (chan, PAGE_SIZE);
704 OUT_RING (chan, PAGE_SIZE);
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000705 OUT_RING (chan, new_reg->num_pages);
Ben Skeggs6d597022012-04-01 21:09:13 +1000706 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
Ben Skeggsc6b7e892012-03-20 14:36:04 +1000707 }
708 return ret;
709}
710
711static int
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000712nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
713{
714 int ret = RING_SPACE(chan, 2);
715 if (ret == 0) {
716 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
717 OUT_RING (chan, handle);
718 }
719 return ret;
720}
721
722static int
Ben Skeggs1a460982012-05-04 15:17:28 +1000723nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000724 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggs1a460982012-05-04 15:17:28 +1000725{
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000726 struct nouveau_mem *mem = nouveau_mem(old_reg);
727 u64 src_offset = mem->vma[0].addr;
728 u64 dst_offset = mem->vma[1].addr;
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000729 u32 page_count = new_reg->num_pages;
Ben Skeggs1a460982012-05-04 15:17:28 +1000730 int ret;
731
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000732 page_count = new_reg->num_pages;
Ben Skeggs1a460982012-05-04 15:17:28 +1000733 while (page_count) {
734 int line_count = (page_count > 8191) ? 8191 : page_count;
735
736 ret = RING_SPACE(chan, 11);
737 if (ret)
738 return ret;
739
740 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
741 OUT_RING (chan, upper_32_bits(src_offset));
742 OUT_RING (chan, lower_32_bits(src_offset));
743 OUT_RING (chan, upper_32_bits(dst_offset));
744 OUT_RING (chan, lower_32_bits(dst_offset));
745 OUT_RING (chan, PAGE_SIZE);
746 OUT_RING (chan, PAGE_SIZE);
747 OUT_RING (chan, PAGE_SIZE);
748 OUT_RING (chan, line_count);
749 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
750 OUT_RING (chan, 0x00000110);
751
752 page_count -= line_count;
753 src_offset += (PAGE_SIZE * line_count);
754 dst_offset += (PAGE_SIZE * line_count);
755 }
756
757 return 0;
758}
759
760static int
Ben Skeggs183720b2010-12-09 15:17:10 +1000761nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000762 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggs183720b2010-12-09 15:17:10 +1000763{
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000764 struct nouveau_mem *mem = nouveau_mem(old_reg);
765 u64 src_offset = mem->vma[0].addr;
766 u64 dst_offset = mem->vma[1].addr;
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000767 u32 page_count = new_reg->num_pages;
Ben Skeggs183720b2010-12-09 15:17:10 +1000768 int ret;
769
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000770 page_count = new_reg->num_pages;
Ben Skeggs183720b2010-12-09 15:17:10 +1000771 while (page_count) {
772 int line_count = (page_count > 2047) ? 2047 : page_count;
773
774 ret = RING_SPACE(chan, 12);
775 if (ret)
776 return ret;
777
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000778 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
Ben Skeggs183720b2010-12-09 15:17:10 +1000779 OUT_RING (chan, upper_32_bits(dst_offset));
780 OUT_RING (chan, lower_32_bits(dst_offset));
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000781 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
Ben Skeggs183720b2010-12-09 15:17:10 +1000782 OUT_RING (chan, upper_32_bits(src_offset));
783 OUT_RING (chan, lower_32_bits(src_offset));
784 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
785 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
786 OUT_RING (chan, PAGE_SIZE); /* line_length */
787 OUT_RING (chan, line_count);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000788 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
Ben Skeggs183720b2010-12-09 15:17:10 +1000789 OUT_RING (chan, 0x00100110);
790
791 page_count -= line_count;
792 src_offset += (PAGE_SIZE * line_count);
793 dst_offset += (PAGE_SIZE * line_count);
794 }
795
796 return 0;
797}
798
799static int
Ben Skeggsfdf53242012-05-04 15:15:12 +1000800nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000801 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggsfdf53242012-05-04 15:15:12 +1000802{
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000803 struct nouveau_mem *mem = nouveau_mem(old_reg);
804 u64 src_offset = mem->vma[0].addr;
805 u64 dst_offset = mem->vma[1].addr;
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000806 u32 page_count = new_reg->num_pages;
Ben Skeggsfdf53242012-05-04 15:15:12 +1000807 int ret;
808
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000809 page_count = new_reg->num_pages;
Ben Skeggsfdf53242012-05-04 15:15:12 +1000810 while (page_count) {
811 int line_count = (page_count > 8191) ? 8191 : page_count;
812
813 ret = RING_SPACE(chan, 11);
814 if (ret)
815 return ret;
816
817 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
818 OUT_RING (chan, upper_32_bits(src_offset));
819 OUT_RING (chan, lower_32_bits(src_offset));
820 OUT_RING (chan, upper_32_bits(dst_offset));
821 OUT_RING (chan, lower_32_bits(dst_offset));
822 OUT_RING (chan, PAGE_SIZE);
823 OUT_RING (chan, PAGE_SIZE);
824 OUT_RING (chan, PAGE_SIZE);
825 OUT_RING (chan, line_count);
826 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
827 OUT_RING (chan, 0x00000110);
828
829 page_count -= line_count;
830 src_offset += (PAGE_SIZE * line_count);
831 dst_offset += (PAGE_SIZE * line_count);
832 }
833
834 return 0;
835}
836
837static int
Ben Skeggs5490e5d2012-05-04 14:34:16 +1000838nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000839 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggs5490e5d2012-05-04 14:34:16 +1000840{
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000841 struct nouveau_mem *mem = nouveau_mem(old_reg);
Ben Skeggs5490e5d2012-05-04 14:34:16 +1000842 int ret = RING_SPACE(chan, 7);
843 if (ret == 0) {
844 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000845 OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
846 OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
847 OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
848 OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
Ben Skeggs5490e5d2012-05-04 14:34:16 +1000849 OUT_RING (chan, 0x00000000 /* COPY */);
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000850 OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
Ben Skeggs5490e5d2012-05-04 14:34:16 +1000851 }
852 return ret;
853}
854
855static int
Ben Skeggs4c193d22012-05-04 14:21:15 +1000856nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000857 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggs4c193d22012-05-04 14:21:15 +1000858{
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000859 struct nouveau_mem *mem = nouveau_mem(old_reg);
Ben Skeggs4c193d22012-05-04 14:21:15 +1000860 int ret = RING_SPACE(chan, 7);
861 if (ret == 0) {
862 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000863 OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000864 OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
865 OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
866 OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
867 OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
Ben Skeggs4c193d22012-05-04 14:21:15 +1000868 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
869 }
870 return ret;
871}
872
873static int
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000874nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
875{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000876 int ret = RING_SPACE(chan, 6);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000877 if (ret == 0) {
Ben Skeggsebb945a2012-07-20 08:17:34 +1000878 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
879 OUT_RING (chan, handle);
880 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
Ben Skeggsf45f55c2014-08-10 04:10:23 +1000881 OUT_RING (chan, chan->drm->ntfy.handle);
882 OUT_RING (chan, chan->vram.handle);
883 OUT_RING (chan, chan->vram.handle);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000884 }
885
886 return ret;
887}
888
889static int
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000890nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000891 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000892{
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000893 struct nouveau_mem *mem = nouveau_mem(old_reg);
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000894 u64 length = (new_reg->num_pages << PAGE_SHIFT);
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000895 u64 src_offset = mem->vma[0].addr;
896 u64 dst_offset = mem->vma[1].addr;
897 int src_tiled = !!mem->kind;
898 int dst_tiled = !!nouveau_mem(new_reg)->kind;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000899 int ret;
900
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000901 while (length) {
902 u32 amount, stride, height;
903
Maarten Lankhorstce8f7692013-11-12 13:34:08 +0100904 ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
905 if (ret)
906 return ret;
907
Ben Skeggs5220b3c2010-09-23 15:21:17 +1000908 amount = min(length, (u64)(4 * 1024 * 1024));
909 stride = 16 * 4;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000910 height = amount / stride;
911
Maarten Lankhorstce8f7692013-11-12 13:34:08 +0100912 if (src_tiled) {
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000913 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000914 OUT_RING (chan, 0);
Ben Skeggs5220b3c2010-09-23 15:21:17 +1000915 OUT_RING (chan, 0);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000916 OUT_RING (chan, stride);
917 OUT_RING (chan, height);
918 OUT_RING (chan, 1);
919 OUT_RING (chan, 0);
920 OUT_RING (chan, 0);
921 } else {
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000922 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000923 OUT_RING (chan, 1);
924 }
Maarten Lankhorstce8f7692013-11-12 13:34:08 +0100925 if (dst_tiled) {
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000926 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000927 OUT_RING (chan, 0);
Ben Skeggs5220b3c2010-09-23 15:21:17 +1000928 OUT_RING (chan, 0);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000929 OUT_RING (chan, stride);
930 OUT_RING (chan, height);
931 OUT_RING (chan, 1);
932 OUT_RING (chan, 0);
933 OUT_RING (chan, 0);
934 } else {
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000935 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000936 OUT_RING (chan, 1);
937 }
938
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000939 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000940 OUT_RING (chan, upper_32_bits(src_offset));
941 OUT_RING (chan, upper_32_bits(dst_offset));
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000942 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000943 OUT_RING (chan, lower_32_bits(src_offset));
944 OUT_RING (chan, lower_32_bits(dst_offset));
945 OUT_RING (chan, stride);
946 OUT_RING (chan, stride);
947 OUT_RING (chan, stride);
948 OUT_RING (chan, height);
949 OUT_RING (chan, 0x00000101);
950 OUT_RING (chan, 0x00000000);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000951 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000952 OUT_RING (chan, 0);
953
954 length -= amount;
955 src_offset += amount;
956 dst_offset += amount;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000957 }
958
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000959 return 0;
960}
961
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000962static int
963nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
964{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000965 int ret = RING_SPACE(chan, 4);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000966 if (ret == 0) {
Ben Skeggsebb945a2012-07-20 08:17:34 +1000967 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
968 OUT_RING (chan, handle);
969 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
Ben Skeggsf45f55c2014-08-10 04:10:23 +1000970 OUT_RING (chan, chan->drm->ntfy.handle);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000971 }
972
973 return ret;
974}
975
Ben Skeggsa6704782011-02-16 09:10:20 +1000976static inline uint32_t
977nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000978 struct nouveau_channel *chan, struct ttm_mem_reg *reg)
Ben Skeggsa6704782011-02-16 09:10:20 +1000979{
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000980 if (reg->mem_type == TTM_PL_TT)
Ben Skeggsebb945a2012-07-20 08:17:34 +1000981 return NvDmaTT;
Ben Skeggsf45f55c2014-08-10 04:10:23 +1000982 return chan->vram.handle;
Ben Skeggsa6704782011-02-16 09:10:20 +1000983}
984
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000985static int
986nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000987 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000988{
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000989 u32 src_offset = old_reg->start << PAGE_SHIFT;
990 u32 dst_offset = new_reg->start << PAGE_SHIFT;
991 u32 page_count = new_reg->num_pages;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000992 int ret;
993
994 ret = RING_SPACE(chan, 3);
995 if (ret)
996 return ret;
997
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000998 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000999 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_reg));
1000 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_reg));
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001001
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001002 page_count = new_reg->num_pages;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001003 while (page_count) {
1004 int line_count = (page_count > 2047) ? 2047 : page_count;
1005
Ben Skeggs6ee73862009-12-11 19:24:15 +10001006 ret = RING_SPACE(chan, 11);
1007 if (ret)
1008 return ret;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001009
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001010 BEGIN_NV04(chan, NvSubCopy,
Ben Skeggs6ee73862009-12-11 19:24:15 +10001011 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001012 OUT_RING (chan, src_offset);
1013 OUT_RING (chan, dst_offset);
1014 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
1015 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
1016 OUT_RING (chan, PAGE_SIZE); /* line_length */
1017 OUT_RING (chan, line_count);
1018 OUT_RING (chan, 0x00000101);
1019 OUT_RING (chan, 0x00000000);
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001020 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001021 OUT_RING (chan, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001022
1023 page_count -= line_count;
1024 src_offset += (PAGE_SIZE * line_count);
1025 dst_offset += (PAGE_SIZE * line_count);
1026 }
1027
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001028 return 0;
1029}
1030
1031static int
Ben Skeggs3c57d852013-11-22 10:35:25 +10001032nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001033 struct ttm_mem_reg *reg)
Ben Skeggsd2f966662011-06-06 20:54:42 +10001034{
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001035 struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
1036 struct nouveau_mem *new_mem = nouveau_mem(reg);
1037 struct nvkm_vm *vmm = drm->client.vm;
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001038 u64 size = (u64)reg->num_pages << PAGE_SHIFT;
Ben Skeggsd2f966662011-06-06 20:54:42 +10001039 int ret;
1040
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001041 ret = nvkm_vm_get(vmm, size, old_mem->mem.page, NV_MEM_ACCESS_RW,
1042 &old_mem->vma[0]);
Ben Skeggsd2f966662011-06-06 20:54:42 +10001043 if (ret)
1044 return ret;
1045
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001046 ret = nvkm_vm_get(vmm, size, new_mem->mem.page, NV_MEM_ACCESS_RW,
1047 &old_mem->vma[1]);
Ben Skeggs3c57d852013-11-22 10:35:25 +10001048 if (ret) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001049 nvkm_vm_put(&old_mem->vma[0]);
Ben Skeggs3c57d852013-11-22 10:35:25 +10001050 return ret;
1051 }
1052
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001053 ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]);
1054 if (ret)
1055 goto done;
1056
1057 ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]);
1058done:
1059 if (ret) {
1060 nvkm_vm_put(&old_mem->vma[1]);
1061 nvkm_vm_put(&old_mem->vma[0]);
1062 }
Ben Skeggsd2f966662011-06-06 20:54:42 +10001063 return 0;
1064}
1065
1066static int
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001067nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001068 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001069{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001070 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
Dave Jones1934a2a2013-09-17 17:26:34 -04001071 struct nouveau_channel *chan = drm->ttm.chan;
Ben Skeggsa01ca782015-08-20 14:54:15 +10001072 struct nouveau_cli *cli = (void *)chan->user.client;
Ben Skeggs35b81412013-11-22 10:39:57 +10001073 struct nouveau_fence *fence;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001074 int ret;
1075
Ben Skeggsd2f966662011-06-06 20:54:42 +10001076 /* create temporary vmas for the transfer and attach them to the
Ben Skeggsbe83cd42015-01-14 15:36:34 +10001077 * old nvkm_mem node, these will get cleaned up after ttm has
Ben Skeggsd2f966662011-06-06 20:54:42 +10001078 * destroyed the ttm_mem_reg
Ben Skeggs3425df42011-02-10 11:22:12 +10001079 */
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001080 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001081 ret = nouveau_bo_move_prep(drm, bo, new_reg);
Ben Skeggsd2f966662011-06-06 20:54:42 +10001082 if (ret)
Ben Skeggs3c57d852013-11-22 10:35:25 +10001083 return ret;
Ben Skeggs3425df42011-02-10 11:22:12 +10001084 }
1085
Ben Skeggs0ad72862014-08-10 04:10:22 +10001086 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
Maarten Lankhorste3be4c22014-09-16 11:15:07 +02001087 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
Ben Skeggs6a6b73f2010-10-05 16:53:48 +10001088 if (ret == 0) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001089 ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
Ben Skeggs35b81412013-11-22 10:39:57 +10001090 if (ret == 0) {
1091 ret = nouveau_fence_new(chan, false, &fence);
1092 if (ret == 0) {
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +02001093 ret = ttm_bo_move_accel_cleanup(bo,
1094 &fence->base,
Ben Skeggs35b81412013-11-22 10:39:57 +10001095 evict,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001096 new_reg);
Ben Skeggs35b81412013-11-22 10:39:57 +10001097 nouveau_fence_unref(&fence);
1098 }
1099 }
Ben Skeggs6a6b73f2010-10-05 16:53:48 +10001100 }
Ben Skeggs0ad72862014-08-10 04:10:22 +10001101 mutex_unlock(&cli->mutex);
Ben Skeggs6a6b73f2010-10-05 16:53:48 +10001102 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001103}
1104
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001105void
Ben Skeggs49981042012-08-06 19:38:25 +10001106nouveau_bo_move_init(struct nouveau_drm *drm)
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001107{
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001108 static const struct {
1109 const char *name;
Ben Skeggs1a460982012-05-04 15:17:28 +10001110 int engine;
Ben Skeggs315a8b22015-08-20 14:54:16 +10001111 s32 oclass;
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001112 int (*exec)(struct nouveau_channel *,
1113 struct ttm_buffer_object *,
1114 struct ttm_mem_reg *, struct ttm_mem_reg *);
1115 int (*init)(struct nouveau_channel *, u32 handle);
1116 } _methods[] = {
Ben Skeggs146cfe22016-07-09 10:41:01 +10001117 { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
1118 { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
Ben Skeggs8e7e15862016-07-09 10:41:01 +10001119 { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
1120 { "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
Ben Skeggs990b4542015-04-14 11:50:35 +10001121 { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
1122 { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
Ben Skeggs00fc6f62013-07-09 14:20:15 +10001123 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
Ben Skeggs49981042012-08-06 19:38:25 +10001124 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
Ben Skeggs1a460982012-05-04 15:17:28 +10001125 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1126 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1127 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1128 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1129 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1130 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1131 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
Ben Skeggs5490e5d2012-05-04 14:34:16 +10001132 {},
Ben Skeggs1a460982012-05-04 15:17:28 +10001133 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001134 }, *mthd = _methods;
1135 const char *name = "CPU";
1136 int ret;
1137
1138 do {
Ben Skeggs49981042012-08-06 19:38:25 +10001139 struct nouveau_channel *chan;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001140
Ben Skeggs00fc6f62013-07-09 14:20:15 +10001141 if (mthd->engine)
Ben Skeggs49981042012-08-06 19:38:25 +10001142 chan = drm->cechan;
1143 else
1144 chan = drm->channel;
1145 if (chan == NULL)
1146 continue;
1147
Ben Skeggsa01ca782015-08-20 14:54:15 +10001148 ret = nvif_object_init(&chan->user,
Ben Skeggs0ad72862014-08-10 04:10:22 +10001149 mthd->oclass | (mthd->engine << 16),
1150 mthd->oclass, NULL, 0,
1151 &drm->ttm.copy);
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001152 if (ret == 0) {
Ben Skeggs0ad72862014-08-10 04:10:22 +10001153 ret = mthd->init(chan, drm->ttm.copy.handle);
Ben Skeggsebb945a2012-07-20 08:17:34 +10001154 if (ret) {
Ben Skeggs0ad72862014-08-10 04:10:22 +10001155 nvif_object_fini(&drm->ttm.copy);
Ben Skeggsebb945a2012-07-20 08:17:34 +10001156 continue;
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001157 }
Ben Skeggsebb945a2012-07-20 08:17:34 +10001158
1159 drm->ttm.move = mthd->exec;
Ben Skeggs1bb3f6a2013-07-08 10:40:35 +10001160 drm->ttm.chan = chan;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001161 name = mthd->name;
1162 break;
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001163 }
1164 } while ((++mthd)->exec);
1165
Ben Skeggsebb945a2012-07-20 08:17:34 +10001166 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001167}
1168
Ben Skeggs6ee73862009-12-11 19:24:15 +10001169static int
1170nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001171 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
Ben Skeggs6ee73862009-12-11 19:24:15 +10001172{
Christian Königf1217ed2014-08-27 13:16:04 +02001173 struct ttm_place placement_memtype = {
1174 .fpfn = 0,
1175 .lpfn = 0,
1176 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1177 };
Ben Skeggs6ee73862009-12-11 19:24:15 +10001178 struct ttm_placement placement;
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001179 struct ttm_mem_reg tmp_reg;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001180 int ret;
1181
Ben Skeggs6ee73862009-12-11 19:24:15 +10001182 placement.num_placement = placement.num_busy_placement = 1;
Francisco Jerez77e2b5e2009-12-16 19:05:00 +01001183 placement.placement = placement.busy_placement = &placement_memtype;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001184
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001185 tmp_reg = *new_reg;
1186 tmp_reg.mm_node = NULL;
1187 ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001188 if (ret)
1189 return ret;
1190
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001191 ret = ttm_tt_bind(bo->ttm, &tmp_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001192 if (ret)
1193 goto out;
1194
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001195 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001196 if (ret)
1197 goto out;
1198
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001199 ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001200out:
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001201 ttm_bo_mem_put(bo, &tmp_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001202 return ret;
1203}
1204
1205static int
1206nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001207 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
Ben Skeggs6ee73862009-12-11 19:24:15 +10001208{
Christian Königf1217ed2014-08-27 13:16:04 +02001209 struct ttm_place placement_memtype = {
1210 .fpfn = 0,
1211 .lpfn = 0,
1212 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1213 };
Ben Skeggs6ee73862009-12-11 19:24:15 +10001214 struct ttm_placement placement;
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001215 struct ttm_mem_reg tmp_reg;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001216 int ret;
1217
Ben Skeggs6ee73862009-12-11 19:24:15 +10001218 placement.num_placement = placement.num_busy_placement = 1;
Francisco Jerez77e2b5e2009-12-16 19:05:00 +01001219 placement.placement = placement.busy_placement = &placement_memtype;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001220
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001221 tmp_reg = *new_reg;
1222 tmp_reg.mm_node = NULL;
1223 ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001224 if (ret)
1225 return ret;
1226
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001227 ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001228 if (ret)
1229 goto out;
1230
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001231 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001232 if (ret)
1233 goto out;
1234
1235out:
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001236 ttm_bo_mem_put(bo, &tmp_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001237 return ret;
1238}
1239
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001240static void
Nicolai Hähnle66257db2016-12-15 17:23:49 +01001241nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001242 struct ttm_mem_reg *new_reg)
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001243{
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001244 struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001245 struct nouveau_bo *nvbo = nouveau_bo(bo);
Ben Skeggs24e83752017-11-01 03:56:19 +10001246 struct nouveau_vma *vma;
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001247
Ben Skeggs9f1feed2012-01-25 15:34:22 +10001248 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1249 if (bo->destroy != nouveau_bo_del_ttm)
1250 return;
1251
Ben Skeggsa48296a2017-11-01 03:56:19 +10001252 if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001253 mem->mem.page == nvbo->page) {
Ben Skeggsa48296a2017-11-01 03:56:19 +10001254 list_for_each_entry(vma, &nvbo->vma_list, head) {
Ben Skeggs24e83752017-11-01 03:56:19 +10001255 nouveau_vma_map(vma, mem);
Ben Skeggsa48296a2017-11-01 03:56:19 +10001256 }
1257 } else {
1258 list_for_each_entry(vma, &nvbo->vma_list, head) {
Ben Skeggs10dcab32016-12-12 17:52:45 +10001259 WARN_ON(ttm_bo_wait(bo, false, false));
Ben Skeggs24e83752017-11-01 03:56:19 +10001260 nouveau_vma_unmap(vma);
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001261 }
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001262 }
1263}
1264
Ben Skeggs6ee73862009-12-11 19:24:15 +10001265static int
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001266nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg,
Ben Skeggsebb945a2012-07-20 08:17:34 +10001267 struct nouveau_drm_tile **new_tile)
Ben Skeggs6ee73862009-12-11 19:24:15 +10001268{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001269 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1270 struct drm_device *dev = drm->dev;
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001271 struct nouveau_bo *nvbo = nouveau_bo(bo);
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001272 u64 offset = new_reg->start << PAGE_SHIFT;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001273
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001274 *new_tile = NULL;
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001275 if (new_reg->mem_type != TTM_PL_VRAM)
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001276 return 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001277
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001278 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001279 *new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
Ben Skeggs7760a2e2017-11-01 03:56:19 +10001280 nvbo->mode, nvbo->zeta);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001281 }
1282
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001283 return 0;
1284}
Ben Skeggs6ee73862009-12-11 19:24:15 +10001285
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001286static void
1287nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
Ben Skeggsebb945a2012-07-20 08:17:34 +10001288 struct nouveau_drm_tile *new_tile,
1289 struct nouveau_drm_tile **old_tile)
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001290{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001291 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1292 struct drm_device *dev = drm->dev;
Chris Wilsonf54d1862016-10-25 13:00:45 +01001293 struct dma_fence *fence = reservation_object_get_excl(bo->resv);
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001294
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +02001295 nv10_bo_put_tile_region(dev, *old_tile, fence);
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001296 *old_tile = new_tile;
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001297}
1298
1299static int
1300nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001301 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001302{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001303 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001304 struct nouveau_bo *nvbo = nouveau_bo(bo);
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001305 struct ttm_mem_reg *old_reg = &bo->mem;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001306 struct nouveau_drm_tile *new_tile = NULL;
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001307 int ret = 0;
1308
Christian König88932a72016-06-06 10:17:53 +02001309 ret = ttm_bo_wait(bo, intr, no_wait_gpu);
1310 if (ret)
1311 return ret;
1312
Alexandre Courbot5be5a152014-10-27 18:11:52 +09001313 if (nvbo->pin_refcnt)
1314 NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
1315
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001316 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001317 ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001318 if (ret)
1319 return ret;
1320 }
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001321
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001322 /* Fake bo copy. */
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001323 if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
Ben Skeggs6ee73862009-12-11 19:24:15 +10001324 BUG_ON(bo->mem.mm_node != NULL);
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001325 bo->mem = *new_reg;
1326 new_reg->mm_node = NULL;
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001327 goto out;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001328 }
1329
Ben Skeggscef9e992013-11-22 10:52:54 +10001330 /* Hardware assisted copy. */
1331 if (drm->ttm.move) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001332 if (new_reg->mem_type == TTM_PL_SYSTEM)
Ben Skeggscef9e992013-11-22 10:52:54 +10001333 ret = nouveau_bo_move_flipd(bo, evict, intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001334 no_wait_gpu, new_reg);
1335 else if (old_reg->mem_type == TTM_PL_SYSTEM)
Ben Skeggscef9e992013-11-22 10:52:54 +10001336 ret = nouveau_bo_move_flips(bo, evict, intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001337 no_wait_gpu, new_reg);
Ben Skeggscef9e992013-11-22 10:52:54 +10001338 else
1339 ret = nouveau_bo_move_m2mf(bo, evict, intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001340 no_wait_gpu, new_reg);
Ben Skeggscef9e992013-11-22 10:52:54 +10001341 if (!ret)
1342 goto out;
Ben Skeggsb8a6a802010-08-27 11:55:43 +10001343 }
1344
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001345 /* Fallback to software copy. */
Christian König8aa6d4f2016-04-06 11:12:04 +02001346 ret = ttm_bo_wait(bo, intr, no_wait_gpu);
Ben Skeggscef9e992013-11-22 10:52:54 +10001347 if (ret == 0)
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001348 ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_reg);
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001349
1350out:
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001351 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001352 if (ret)
1353 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1354 else
1355 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1356 }
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001357
1358 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001359}
1360
1361static int
1362nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1363{
David Herrmannacb46522013-08-25 18:28:59 +02001364 struct nouveau_bo *nvbo = nouveau_bo(bo);
1365
David Herrmannd9a1f0b2016-09-01 14:48:33 +02001366 return drm_vma_node_verify_access(&nvbo->gem.vma_node,
1367 filp->private_data);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001368}
1369
Jerome Glissef32f02f2010-04-09 14:39:25 +02001370static int
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001371nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
Jerome Glissef32f02f2010-04-09 14:39:25 +02001372{
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001373 struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type];
Ben Skeggsebb945a2012-07-20 08:17:34 +10001374 struct nouveau_drm *drm = nouveau_bdev(bdev);
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001375 struct nvkm_device *device = nvxx_device(&drm->client.device);
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001376 struct nouveau_mem *mem = nouveau_mem(reg);
Ben Skeggsf869ef82010-11-15 11:53:16 +10001377 int ret;
Jerome Glissef32f02f2010-04-09 14:39:25 +02001378
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001379 reg->bus.addr = NULL;
1380 reg->bus.offset = 0;
1381 reg->bus.size = reg->num_pages << PAGE_SHIFT;
1382 reg->bus.base = 0;
1383 reg->bus.is_iomem = false;
Jerome Glissef32f02f2010-04-09 14:39:25 +02001384 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1385 return -EINVAL;
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001386 switch (reg->mem_type) {
Jerome Glissef32f02f2010-04-09 14:39:25 +02001387 case TTM_PL_SYSTEM:
1388 /* System memory */
1389 return 0;
1390 case TTM_PL_TT:
Daniel Vettera7fb8a22015-09-09 16:45:52 +02001391#if IS_ENABLED(CONFIG_AGP)
Ben Skeggs340b0e72015-08-20 14:54:23 +10001392 if (drm->agp.bridge) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001393 reg->bus.offset = reg->start << PAGE_SHIFT;
1394 reg->bus.base = drm->agp.base;
1395 reg->bus.is_iomem = !drm->agp.cma;
Jerome Glissef32f02f2010-04-09 14:39:25 +02001396 }
1397#endif
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001398 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || !mem->kind)
Maarten Lankhorsta5540902013-11-12 13:34:09 +01001399 /* untiled */
1400 break;
1401 /* fallthrough, tiled memory */
Jerome Glissef32f02f2010-04-09 14:39:25 +02001402 case TTM_PL_VRAM:
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001403 reg->bus.offset = reg->start << PAGE_SHIFT;
1404 reg->bus.base = device->func->resource_addr(device, 1);
1405 reg->bus.is_iomem = true;
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001406 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
Ben Skeggs570889d2017-11-01 03:56:19 +10001407 struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
Ben Skeggsd8e83992015-08-20 14:54:17 +10001408 int page_shift = 12;
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001409 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001410 page_shift = mem->mem.page;
Ben Skeggs3863c9b2012-07-14 19:09:17 +10001411
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001412 ret = nvkm_vm_get(bar, mem->_mem->size << 12,
1413 page_shift, NV_MEM_ACCESS_RW,
1414 &mem->bar_vma);
Ben Skeggs3863c9b2012-07-14 19:09:17 +10001415 if (ret)
1416 return ret;
1417
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001418 nvkm_vm_map(&mem->bar_vma, mem->_mem);
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001419 reg->bus.offset = mem->bar_vma.offset;
Ben Skeggs3863c9b2012-07-14 19:09:17 +10001420 }
Jerome Glissef32f02f2010-04-09 14:39:25 +02001421 break;
1422 default:
1423 return -EINVAL;
1424 }
1425 return 0;
1426}
1427
1428static void
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001429nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
Jerome Glissef32f02f2010-04-09 14:39:25 +02001430{
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001431 struct nouveau_mem *mem = nouveau_mem(reg);
Ben Skeggsf869ef82010-11-15 11:53:16 +10001432
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001433 if (!mem->bar_vma.node)
Ben Skeggsf869ef82010-11-15 11:53:16 +10001434 return;
1435
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001436 nvkm_vm_put(&mem->bar_vma);
Jerome Glissef32f02f2010-04-09 14:39:25 +02001437}
1438
1439static int
1440nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1441{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001442 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
Ben Skeggse1429b42010-09-10 11:12:25 +10001443 struct nouveau_bo *nvbo = nouveau_bo(bo);
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001444 struct nvkm_device *device = nvxx_device(&drm->client.device);
Ben Skeggs7e8820f2015-08-20 14:54:23 +10001445 u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
Christian Königf1217ed2014-08-27 13:16:04 +02001446 int i, ret;
Ben Skeggse1429b42010-09-10 11:12:25 +10001447
1448 /* as long as the bo isn't in vram, and isn't tiled, we've got
1449 * nothing to do here.
1450 */
1451 if (bo->mem.mem_type != TTM_PL_VRAM) {
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001452 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
Ben Skeggs7760a2e2017-11-01 03:56:19 +10001453 !nvbo->kind)
Ben Skeggse1429b42010-09-10 11:12:25 +10001454 return 0;
Maarten Lankhorsta5540902013-11-12 13:34:09 +01001455
1456 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1457 nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1458
1459 ret = nouveau_bo_validate(nvbo, false, false);
1460 if (ret)
1461 return ret;
1462 }
1463 return 0;
Ben Skeggse1429b42010-09-10 11:12:25 +10001464 }
1465
1466 /* make sure bo is in mappable vram */
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001467 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
Maarten Lankhorsta5540902013-11-12 13:34:09 +01001468 bo->mem.start + bo->mem.num_pages < mappable)
Ben Skeggse1429b42010-09-10 11:12:25 +10001469 return 0;
1470
Christian Königf1217ed2014-08-27 13:16:04 +02001471 for (i = 0; i < nvbo->placement.num_placement; ++i) {
1472 nvbo->placements[i].fpfn = 0;
1473 nvbo->placements[i].lpfn = mappable;
1474 }
Ben Skeggse1429b42010-09-10 11:12:25 +10001475
Christian Königf1217ed2014-08-27 13:16:04 +02001476 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1477 nvbo->busy_placements[i].fpfn = 0;
1478 nvbo->busy_placements[i].lpfn = mappable;
1479 }
1480
Dave Airliec2848152012-05-18 15:31:12 +01001481 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001482 return nouveau_bo_validate(nvbo, false, false);
Jerome Glissef32f02f2010-04-09 14:39:25 +02001483}
1484
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001485static int
1486nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1487{
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001488 struct ttm_dma_tt *ttm_dma = (void *)ttm;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001489 struct nouveau_drm *drm;
Ben Skeggs359088d2017-11-01 03:56:19 +10001490 struct device *dev;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001491 unsigned i;
1492 int r;
Dave Airlie22b33e82012-04-02 11:53:06 +01001493 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001494
1495 if (ttm->state != tt_unpopulated)
1496 return 0;
1497
Dave Airlie22b33e82012-04-02 11:53:06 +01001498 if (slave && ttm->sg) {
1499 /* make userspace faulting work */
1500 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1501 ttm_dma->dma_address, ttm->num_pages);
1502 ttm->state = tt_unbound;
1503 return 0;
1504 }
1505
Ben Skeggsebb945a2012-07-20 08:17:34 +10001506 drm = nouveau_bdev(ttm->bdev);
Ben Skeggs359088d2017-11-01 03:56:19 +10001507 dev = drm->dev->dev;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001508
Daniel Vettera7fb8a22015-09-09 16:45:52 +02001509#if IS_ENABLED(CONFIG_AGP)
Ben Skeggs340b0e72015-08-20 14:54:23 +10001510 if (drm->agp.bridge) {
Jerome Glissedea7e0a2012-01-03 17:37:37 -05001511 return ttm_agp_tt_populate(ttm);
1512 }
1513#endif
1514
Alexandre Courbot9bcd38d2016-03-02 19:12:27 +09001515#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001516 if (swiotlb_nr_tbl()) {
Ben Skeggs359088d2017-11-01 03:56:19 +10001517 return ttm_dma_populate((void *)ttm, dev);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001518 }
1519#endif
1520
1521 r = ttm_pool_populate(ttm);
1522 if (r) {
1523 return r;
1524 }
1525
1526 for (i = 0; i < ttm->num_pages; i++) {
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001527 dma_addr_t addr;
1528
Ben Skeggs359088d2017-11-01 03:56:19 +10001529 addr = dma_map_page(dev, ttm->pages[i], 0, PAGE_SIZE,
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001530 DMA_BIDIRECTIONAL);
1531
Ben Skeggs359088d2017-11-01 03:56:19 +10001532 if (dma_mapping_error(dev, addr)) {
Rasmus Villemoes4fbbed42016-02-15 19:41:46 +01001533 while (i--) {
Ben Skeggs359088d2017-11-01 03:56:19 +10001534 dma_unmap_page(dev, ttm_dma->dma_address[i],
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001535 PAGE_SIZE, DMA_BIDIRECTIONAL);
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001536 ttm_dma->dma_address[i] = 0;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001537 }
1538 ttm_pool_unpopulate(ttm);
1539 return -EFAULT;
1540 }
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001541
1542 ttm_dma->dma_address[i] = addr;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001543 }
1544 return 0;
1545}
1546
1547static void
1548nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1549{
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001550 struct ttm_dma_tt *ttm_dma = (void *)ttm;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001551 struct nouveau_drm *drm;
Ben Skeggs359088d2017-11-01 03:56:19 +10001552 struct device *dev;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001553 unsigned i;
Dave Airlie22b33e82012-04-02 11:53:06 +01001554 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1555
1556 if (slave)
1557 return;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001558
Ben Skeggsebb945a2012-07-20 08:17:34 +10001559 drm = nouveau_bdev(ttm->bdev);
Ben Skeggs359088d2017-11-01 03:56:19 +10001560 dev = drm->dev->dev;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001561
Daniel Vettera7fb8a22015-09-09 16:45:52 +02001562#if IS_ENABLED(CONFIG_AGP)
Ben Skeggs340b0e72015-08-20 14:54:23 +10001563 if (drm->agp.bridge) {
Jerome Glissedea7e0a2012-01-03 17:37:37 -05001564 ttm_agp_tt_unpopulate(ttm);
1565 return;
1566 }
1567#endif
1568
Alexandre Courbot9bcd38d2016-03-02 19:12:27 +09001569#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001570 if (swiotlb_nr_tbl()) {
Ben Skeggs359088d2017-11-01 03:56:19 +10001571 ttm_dma_unpopulate((void *)ttm, dev);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001572 return;
1573 }
1574#endif
1575
1576 for (i = 0; i < ttm->num_pages; i++) {
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001577 if (ttm_dma->dma_address[i]) {
Ben Skeggs359088d2017-11-01 03:56:19 +10001578 dma_unmap_page(dev, ttm_dma->dma_address[i], PAGE_SIZE,
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001579 DMA_BIDIRECTIONAL);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001580 }
1581 }
1582
1583 ttm_pool_unpopulate(ttm);
1584}
1585
Maarten Lankhorstdd7cfd62014-01-21 13:07:31 +01001586void
Maarten Lankhorst809e9442014-04-09 16:19:30 +02001587nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
Maarten Lankhorstdd7cfd62014-01-21 13:07:31 +01001588{
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +01001589 struct reservation_object *resv = nvbo->bo.resv;
Maarten Lankhorstdd7cfd62014-01-21 13:07:31 +01001590
Maarten Lankhorst809e9442014-04-09 16:19:30 +02001591 if (exclusive)
1592 reservation_object_add_excl_fence(resv, &fence->base);
1593 else if (fence)
1594 reservation_object_add_shared_fence(resv, &fence->base);
Maarten Lankhorstdd7cfd62014-01-21 13:07:31 +01001595}
1596
Ben Skeggs6ee73862009-12-11 19:24:15 +10001597struct ttm_bo_driver nouveau_bo_driver = {
Jerome Glisse649bf3c2011-11-01 20:46:13 -04001598 .ttm_tt_create = &nouveau_ttm_tt_create,
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001599 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1600 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
Ben Skeggs6ee73862009-12-11 19:24:15 +10001601 .invalidate_caches = nouveau_bo_invalidate_caches,
1602 .init_mem_type = nouveau_bo_init_mem_type,
Christian Königa2ab19fe2016-08-30 17:26:04 +02001603 .eviction_valuable = ttm_bo_eviction_valuable,
Ben Skeggs6ee73862009-12-11 19:24:15 +10001604 .evict_flags = nouveau_bo_evict_flags,
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001605 .move_notify = nouveau_bo_move_ntfy,
Ben Skeggs6ee73862009-12-11 19:24:15 +10001606 .move = nouveau_bo_move,
1607 .verify_access = nouveau_bo_verify_access,
Jerome Glissef32f02f2010-04-09 14:39:25 +02001608 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1609 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1610 .io_mem_free = &nouveau_ttm_io_mem_free,
Christian Königea642c32017-03-28 16:54:50 +02001611 .io_mem_pfn = ttm_bo_default_io_mem_pfn,
Ben Skeggs6ee73862009-12-11 19:24:15 +10001612};