blob: 2615912430cc97098f0fe806e95e5e40c1ee96f7 [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
Ben Skeggsfdb751e2014-08-10 04:10:23 +100030#include <linux/dma-mapping.h>
Chris Metcalf3e2b7562013-02-01 13:44:33 -050031#include <linux/swiotlb.h>
Ben Skeggs6ee73862009-12-11 19:24:15 +100032
Ben Skeggs4dc28132016-05-20 09:22:55 +100033#include "nouveau_drv.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100034#include "nouveau_dma.h"
Ben Skeggsd375e7d52012-04-30 13:30:00 +100035#include "nouveau_fence.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100036
Ben Skeggsebb945a2012-07-20 08:17:34 +100037#include "nouveau_bo.h"
38#include "nouveau_ttm.h"
39#include "nouveau_gem.h"
Ben Skeggs9ce523c2017-11-01 03:56:19 +100040#include "nouveau_mem.h"
Ben Skeggs24e83752017-11-01 03:56:19 +100041#include "nouveau_vmm.h"
Maarten Maathuisa5106042009-12-26 21:46:36 +010042
Ben Skeggsd7722132017-11-01 03:56:20 +100043#include <nvif/class.h>
44#include <nvif/if500b.h>
45#include <nvif/if900b.h>
46
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100047/*
48 * NV10-NV40 tiling helpers
49 */
50
51static void
Ben Skeggsebb945a2012-07-20 08:17:34 +100052nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
53 u32 addr, u32 size, u32 pitch, u32 flags)
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100054{
Ben Skeggs77145f12012-07-31 16:16:21 +100055 struct nouveau_drm *drm = nouveau_drm(dev);
Ben Skeggsebb945a2012-07-20 08:17:34 +100056 int i = reg - drm->tile.reg;
Ben Skeggs359088d2017-11-01 03:56:19 +100057 struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
Ben Skeggsb1e45532015-08-20 14:54:06 +100058 struct nvkm_fb_tile *tile = &fb->tile.region[i];
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100059
Ben Skeggsebb945a2012-07-20 08:17:34 +100060 nouveau_fence_unref(&reg->fence);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100061
62 if (tile->pitch)
Ben Skeggs03c89522015-08-20 14:54:20 +100063 nvkm_fb_tile_fini(fb, i, tile);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100064
65 if (pitch)
Ben Skeggs03c89522015-08-20 14:54:20 +100066 nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100067
Ben Skeggs03c89522015-08-20 14:54:20 +100068 nvkm_fb_tile_prog(fb, i, tile);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100069}
70
Ben Skeggsebb945a2012-07-20 08:17:34 +100071static struct nouveau_drm_tile *
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100072nv10_bo_get_tile_region(struct drm_device *dev, int i)
73{
Ben Skeggs77145f12012-07-31 16:16:21 +100074 struct nouveau_drm *drm = nouveau_drm(dev);
Ben Skeggsebb945a2012-07-20 08:17:34 +100075 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100076
Ben Skeggsebb945a2012-07-20 08:17:34 +100077 spin_lock(&drm->tile.lock);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100078
79 if (!tile->used &&
80 (!tile->fence || nouveau_fence_done(tile->fence)))
81 tile->used = true;
82 else
83 tile = NULL;
84
Ben Skeggsebb945a2012-07-20 08:17:34 +100085 spin_unlock(&drm->tile.lock);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100086 return tile;
87}
88
89static void
Ben Skeggsebb945a2012-07-20 08:17:34 +100090nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
Chris Wilsonf54d1862016-10-25 13:00:45 +010091 struct dma_fence *fence)
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100092{
Ben Skeggs77145f12012-07-31 16:16:21 +100093 struct nouveau_drm *drm = nouveau_drm(dev);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100094
95 if (tile) {
Ben Skeggsebb945a2012-07-20 08:17:34 +100096 spin_lock(&drm->tile.lock);
Chris Wilsonf54d1862016-10-25 13:00:45 +010097 tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100098 tile->used = false;
Ben Skeggsebb945a2012-07-20 08:17:34 +100099 spin_unlock(&drm->tile.lock);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000100 }
101}
102
Ben Skeggsebb945a2012-07-20 08:17:34 +1000103static struct nouveau_drm_tile *
104nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000105 u32 size, u32 pitch, u32 zeta)
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000106{
Ben Skeggs77145f12012-07-31 16:16:21 +1000107 struct nouveau_drm *drm = nouveau_drm(dev);
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000108 struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
Ben Skeggsebb945a2012-07-20 08:17:34 +1000109 struct nouveau_drm_tile *tile, *found = NULL;
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000110 int i;
111
Ben Skeggsb1e45532015-08-20 14:54:06 +1000112 for (i = 0; i < fb->tile.regions; i++) {
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000113 tile = nv10_bo_get_tile_region(dev, i);
114
115 if (pitch && !found) {
116 found = tile;
117 continue;
118
Ben Skeggsb1e45532015-08-20 14:54:06 +1000119 } else if (tile && fb->tile.region[i].pitch) {
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000120 /* Kill an unused tile region. */
121 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
122 }
123
124 nv10_bo_put_tile_region(dev, tile, NULL);
125 }
126
127 if (found)
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000128 nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000129 return found;
130}
131
Ben Skeggs6ee73862009-12-11 19:24:15 +1000132static void
133nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
134{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000135 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
136 struct drm_device *dev = drm->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000137 struct nouveau_bo *nvbo = nouveau_bo(bo);
138
David Herrmann55fb74a2013-10-02 10:15:17 +0200139 if (unlikely(nvbo->gem.filp))
Ben Skeggs6ee73862009-12-11 19:24:15 +1000140 DRM_ERROR("bo %p still attached to GEM object\n", bo);
Maarten Lankhorst4f385592013-07-07 10:37:35 +0200141 WARN_ON(nvbo->pin_refcnt > 0);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000142 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000143 kfree(nvbo);
144}
145
Ben Skeggs4d8b3d32016-05-23 12:34:49 +1000146static inline u64
147roundup_64(u64 x, u32 y)
148{
149 x += y - 1;
150 do_div(x, y);
151 return x * y;
152}
153
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100154static void
Ben Skeggsdb5c8e22011-02-10 13:41:01 +1000155nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
Ben Skeggs4d8b3d32016-05-23 12:34:49 +1000156 int *align, u64 *size)
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100157{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000158 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000159 struct nvif_device *device = &drm->client.device;
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100160
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000161 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000162 if (nvbo->mode) {
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000163 if (device->info.chipset >= 0x40) {
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100164 *align = 65536;
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000165 *size = roundup_64(*size, 64 * nvbo->mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100166
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000167 } else if (device->info.chipset >= 0x30) {
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100168 *align = 32768;
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000169 *size = roundup_64(*size, 64 * nvbo->mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100170
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000171 } else if (device->info.chipset >= 0x20) {
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100172 *align = 16384;
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000173 *size = roundup_64(*size, 64 * nvbo->mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100174
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000175 } else if (device->info.chipset >= 0x10) {
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100176 *align = 16384;
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000177 *size = roundup_64(*size, 32 * nvbo->mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100178 }
179 }
Ben Skeggsbfd83ac2010-11-12 15:12:51 +1000180 } else {
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000181 *size = roundup_64(*size, (1 << nvbo->page));
182 *align = max((1 << nvbo->page), *align);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100183 }
184
Ben Skeggs4d8b3d32016-05-23 12:34:49 +1000185 *size = roundup_64(*size, PAGE_SIZE);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100186}
187
Ben Skeggs6ee73862009-12-11 19:24:15 +1000188int
Ben Skeggs4d8b3d32016-05-23 12:34:49 +1000189nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
Ben Skeggs7375c952011-06-07 14:21:29 +1000190 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
Maarten Lankhorstbb6178b2014-01-09 11:03:15 +0100191 struct sg_table *sg, struct reservation_object *robj,
Ben Skeggs7375c952011-06-07 14:21:29 +1000192 struct nouveau_bo **pnvbo)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000193{
Ben Skeggse75c0912017-11-01 03:56:19 +1000194 struct nouveau_drm *drm = cli->drm;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000195 struct nouveau_bo *nvbo;
Ben Skeggsa220dd72017-11-01 03:56:19 +1000196 struct nvif_mmu *mmu = &cli->mmu;
Ben Skeggs7dc6a442017-11-01 03:56:20 +1000197 struct nvif_vmm *vmm = &cli->vmm.vmm;
Jerome Glisse57de4ba2011-11-11 15:42:57 -0500198 size_t acc_size;
Dave Airlie22b33e82012-04-02 11:53:06 +0100199 int type = ttm_bo_type_device;
Ben Skeggs7dc6a442017-11-01 03:56:20 +1000200 int ret, i, pi = -1;
Maarten Lankhorst35095f72013-07-27 10:17:12 +0200201
Ben Skeggs4d8b3d32016-05-23 12:34:49 +1000202 if (!size) {
203 NV_WARN(drm, "skipped size %016llx\n", size);
Maarten Lankhorst0108bc82013-07-07 10:40:19 +0200204 return -EINVAL;
205 }
Dave Airlie22b33e82012-04-02 11:53:06 +0100206
207 if (sg)
208 type = ttm_bo_type_sg;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000209
210 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
211 if (!nvbo)
212 return -ENOMEM;
213 INIT_LIST_HEAD(&nvbo->head);
214 INIT_LIST_HEAD(&nvbo->entry);
Ben Skeggsfd2871a2011-06-06 14:07:04 +1000215 INIT_LIST_HEAD(&nvbo->vma_list);
Ben Skeggsebb945a2012-07-20 08:17:34 +1000216 nvbo->bo.bdev = &drm->ttm.bdev;
Ben Skeggsbab7cc12016-05-24 17:26:48 +1000217 nvbo->cli = cli;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000218
Ben Skeggsacb16cf2017-11-01 03:56:20 +1000219 /* This is confusing, and doesn't actually mean we want an uncached
220 * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
221 * into in nouveau_gem_new().
222 */
223 if (flags & TTM_PL_FLAG_UNCACHED) {
224 /* Determine if we can get a cache-coherent map, forcing
225 * uncached mapping if we can't.
226 */
227 if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED)
228 nvbo->force_coherent = true;
229 }
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900230
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000231 if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
232 nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
Ben Skeggsa220dd72017-11-01 03:56:19 +1000233 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
234 kfree(nvbo);
235 return -EINVAL;
236 }
237
238 nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000239 } else
240 if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
241 nvbo->kind = (tile_flags & 0x00007f00) >> 8;
242 nvbo->comp = (tile_flags & 0x00030000) >> 16;
Ben Skeggsa220dd72017-11-01 03:56:19 +1000243 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
244 kfree(nvbo);
245 return -EINVAL;
246 }
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000247 } else {
248 nvbo->zeta = (tile_flags & 0x00000007);
249 }
250 nvbo->mode = tile_mode;
251 nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
252
Ben Skeggs7dc6a442017-11-01 03:56:20 +1000253 /* Determine the desirable target GPU page size for the buffer. */
254 for (i = 0; i < vmm->page_nr; i++) {
255 /* Because we cannot currently allow VMM maps to fail
256 * during buffer migration, we need to determine page
257 * size for the buffer up-front, and pre-allocate its
258 * page tables.
259 *
260 * Skip page sizes that can't support needed domains.
261 */
262 if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
263 (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram)
264 continue;
265 if ((flags & TTM_PL_FLAG_TT ) && !vmm->page[i].host)
266 continue;
267
268 /* Select this page size if it's the first that supports
269 * the potential memory domains, or when it's compatible
270 * with the requested compression settings.
271 */
272 if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
273 pi = i;
274
275 /* Stop once the buffer is larger than the current page size. */
276 if (size >= 1ULL << vmm->page[i].shift)
277 break;
Ben Skeggsf91bac52011-06-06 14:15:46 +1000278 }
279
Ben Skeggs7dc6a442017-11-01 03:56:20 +1000280 if (WARN_ON(pi < 0))
281 return -EINVAL;
282
283 /* Disable compression if suitable settings couldn't be found. */
284 if (nvbo->comp && !vmm->page[pi].comp) {
285 if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
286 nvbo->kind = mmu->kind[nvbo->kind];
287 nvbo->comp = 0;
288 }
289 nvbo->page = vmm->page[pi].shift;
290
Ben Skeggsf91bac52011-06-06 14:15:46 +1000291 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
Ben Skeggsfd2871a2011-06-06 14:07:04 +1000292 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
293 nouveau_bo_placement_set(nvbo, flags, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000294
Ben Skeggsebb945a2012-07-20 08:17:34 +1000295 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
Jerome Glisse57de4ba2011-11-11 15:42:57 -0500296 sizeof(struct nouveau_bo));
297
Ben Skeggsebb945a2012-07-20 08:17:34 +1000298 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
Dave Airlie22b33e82012-04-02 11:53:06 +0100299 type, &nvbo->placement,
Marcin Slusarz0b91c4a2012-11-06 21:49:51 +0000300 align >> PAGE_SHIFT, false, NULL, acc_size, sg,
Maarten Lankhorstbb6178b2014-01-09 11:03:15 +0100301 robj, nouveau_bo_del_ttm);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000302 if (ret) {
303 /* ttm will call nouveau_bo_del_ttm if it fails.. */
304 return ret;
305 }
306
Ben Skeggs6ee73862009-12-11 19:24:15 +1000307 *pnvbo = nvbo;
308 return 0;
309}
310
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100311static void
Christian Königf1217ed2014-08-27 13:16:04 +0200312set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000313{
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100314 *n = 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000315
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100316 if (type & TTM_PL_FLAG_VRAM)
Christian Königf1217ed2014-08-27 13:16:04 +0200317 pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100318 if (type & TTM_PL_FLAG_TT)
Christian Königf1217ed2014-08-27 13:16:04 +0200319 pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100320 if (type & TTM_PL_FLAG_SYSTEM)
Christian Königf1217ed2014-08-27 13:16:04 +0200321 pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100322}
Ben Skeggs37cb3e082009-12-16 16:22:42 +1000323
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200324static void
325set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
326{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000327 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000328 u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
Christian Königf1217ed2014-08-27 13:16:04 +0200329 unsigned i, fpfn, lpfn;
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200330
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000331 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000332 nvbo->mode && (type & TTM_PL_FLAG_VRAM) &&
Francisco Jerez4beb1162011-11-06 21:21:28 +0100333 nvbo->bo.mem.num_pages < vram_pages / 4) {
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200334 /*
335 * Make sure that the color and depth buffers are handled
336 * by independent memory controller units. Up to a 9x
337 * speed up when alpha-blending and depth-test are enabled
338 * at the same time.
339 */
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000340 if (nvbo->zeta) {
Christian Königf1217ed2014-08-27 13:16:04 +0200341 fpfn = vram_pages / 2;
342 lpfn = ~0;
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200343 } else {
Christian Königf1217ed2014-08-27 13:16:04 +0200344 fpfn = 0;
345 lpfn = vram_pages / 2;
346 }
347 for (i = 0; i < nvbo->placement.num_placement; ++i) {
348 nvbo->placements[i].fpfn = fpfn;
349 nvbo->placements[i].lpfn = lpfn;
350 }
351 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
352 nvbo->busy_placements[i].fpfn = fpfn;
353 nvbo->busy_placements[i].lpfn = lpfn;
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200354 }
355 }
356}
357
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100358void
359nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
360{
361 struct ttm_placement *pl = &nvbo->placement;
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900362 uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
363 TTM_PL_MASK_CACHING) |
364 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100365
366 pl->placement = nvbo->placements;
367 set_placement_list(nvbo->placements, &pl->num_placement,
368 type, flags);
369
370 pl->busy_placement = nvbo->busy_placements;
371 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
372 type | busy, flags);
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200373
374 set_placement_range(nvbo, type);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000375}
376
377int
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000378nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000379{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000380 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000381 struct ttm_buffer_object *bo = &nvbo->bo;
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000382 bool force = false, evict = false;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100383 int ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000384
Christian Königdfd5e502016-04-06 11:12:03 +0200385 ret = ttm_bo_reserve(bo, false, false, NULL);
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100386 if (ret)
Ben Skeggs50ab2e52014-11-10 11:12:17 +1000387 return ret;
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100388
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000389 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000390 memtype == TTM_PL_FLAG_VRAM && contig) {
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000391 if (!nvbo->contig) {
392 nvbo->contig = true;
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000393 force = true;
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000394 evict = true;
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000395 }
396 }
397
398 if (nvbo->pin_refcnt) {
399 if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
400 NV_ERROR(drm, "bo %p pinned elsewhere: "
401 "0x%08x vs 0x%08x\n", bo,
402 1 << bo->mem.mem_type, memtype);
403 ret = -EBUSY;
404 }
405 nvbo->pin_refcnt++;
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100406 goto out;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000407 }
408
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000409 if (evict) {
410 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
411 ret = nouveau_bo_validate(nvbo, false, false);
412 if (ret)
413 goto out;
414 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000415
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000416 nvbo->pin_refcnt++;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100417 nouveau_bo_placement_set(nvbo, memtype, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000418
Ben Skeggs50ab2e52014-11-10 11:12:17 +1000419 /* drop pin_refcnt temporarily, so we don't trip the assertion
420 * in nouveau_bo_move() that makes sure we're not trying to
421 * move a pinned buffer
422 */
423 nvbo->pin_refcnt--;
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000424 ret = nouveau_bo_validate(nvbo, false, false);
Ben Skeggs6aac6ce2014-11-06 14:34:31 +1000425 if (ret)
426 goto out;
Ben Skeggs50ab2e52014-11-10 11:12:17 +1000427 nvbo->pin_refcnt++;
Ben Skeggs6aac6ce2014-11-06 14:34:31 +1000428
429 switch (bo->mem.mem_type) {
430 case TTM_PL_VRAM:
431 drm->gem.vram_available -= bo->mem.size;
432 break;
433 case TTM_PL_TT:
434 drm->gem.gart_available -= bo->mem.size;
435 break;
436 default:
437 break;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000438 }
Alexandre Courbot5be5a152014-10-27 18:11:52 +0900439
Ben Skeggs6ee73862009-12-11 19:24:15 +1000440out:
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000441 if (force && ret)
Ben Skeggs7760a2e2017-11-01 03:56:19 +1000442 nvbo->contig = false;
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100443 ttm_bo_unreserve(bo);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000444 return ret;
445}
446
447int
448nouveau_bo_unpin(struct nouveau_bo *nvbo)
449{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000450 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000451 struct ttm_buffer_object *bo = &nvbo->bo;
Maarten Lankhorst4f385592013-07-07 10:37:35 +0200452 int ret, ref;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000453
Christian Königdfd5e502016-04-06 11:12:03 +0200454 ret = ttm_bo_reserve(bo, false, false, NULL);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000455 if (ret)
456 return ret;
457
Maarten Lankhorst4f385592013-07-07 10:37:35 +0200458 ref = --nvbo->pin_refcnt;
459 WARN_ON_ONCE(ref < 0);
460 if (ref)
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100461 goto out;
462
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100463 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000464
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000465 ret = nouveau_bo_validate(nvbo, false, false);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000466 if (ret == 0) {
467 switch (bo->mem.mem_type) {
468 case TTM_PL_VRAM:
Ben Skeggsebb945a2012-07-20 08:17:34 +1000469 drm->gem.vram_available += bo->mem.size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000470 break;
471 case TTM_PL_TT:
Ben Skeggsebb945a2012-07-20 08:17:34 +1000472 drm->gem.gart_available += bo->mem.size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000473 break;
474 default:
475 break;
476 }
477 }
478
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100479out:
Ben Skeggs6ee73862009-12-11 19:24:15 +1000480 ttm_bo_unreserve(bo);
481 return ret;
482}
483
484int
485nouveau_bo_map(struct nouveau_bo *nvbo)
486{
487 int ret;
488
Christian Königdfd5e502016-04-06 11:12:03 +0200489 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000490 if (ret)
491 return ret;
492
Alexandre Courbot36a471b2016-07-13 15:29:35 +0900493 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900494
Ben Skeggs6ee73862009-12-11 19:24:15 +1000495 ttm_bo_unreserve(&nvbo->bo);
496 return ret;
497}
498
499void
500nouveau_bo_unmap(struct nouveau_bo *nvbo)
501{
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900502 if (!nvbo)
503 return;
504
Alexandre Courbot36a471b2016-07-13 15:29:35 +0900505 ttm_bo_kunmap(&nvbo->kmap);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000506}
507
Alexandre Courbotb22870b2014-10-27 18:49:19 +0900508void
509nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
510{
511 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Alexandre Courbotb22870b2014-10-27 18:49:19 +0900512 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
513 int i;
514
515 if (!ttm_dma)
516 return;
517
518 /* Don't waste time looping if the object is coherent */
519 if (nvbo->force_coherent)
520 return;
521
522 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
Ben Skeggs359088d2017-11-01 03:56:19 +1000523 dma_sync_single_for_device(drm->dev->dev,
524 ttm_dma->dma_address[i],
Ben Skeggs26c9e8e2015-08-20 14:54:23 +1000525 PAGE_SIZE, DMA_TO_DEVICE);
Alexandre Courbotb22870b2014-10-27 18:49:19 +0900526}
527
528void
529nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
530{
531 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Alexandre Courbotb22870b2014-10-27 18:49:19 +0900532 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
533 int i;
534
535 if (!ttm_dma)
536 return;
537
538 /* Don't waste time looping if the object is coherent */
539 if (nvbo->force_coherent)
540 return;
541
542 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
Ben Skeggs359088d2017-11-01 03:56:19 +1000543 dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
Ben Skeggs26c9e8e2015-08-20 14:54:23 +1000544 PAGE_SIZE, DMA_FROM_DEVICE);
Alexandre Courbotb22870b2014-10-27 18:49:19 +0900545}
546
Ben Skeggs7a45d762010-11-22 08:50:27 +1000547int
548nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000549 bool no_wait_gpu)
Ben Skeggs7a45d762010-11-22 08:50:27 +1000550{
551 int ret;
552
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000553 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
554 interruptible, no_wait_gpu);
Ben Skeggs7a45d762010-11-22 08:50:27 +1000555 if (ret)
556 return ret;
557
Alexandre Courbotb22870b2014-10-27 18:49:19 +0900558 nouveau_bo_sync_for_device(nvbo);
559
Ben Skeggs7a45d762010-11-22 08:50:27 +1000560 return 0;
561}
562
Ben Skeggs6ee73862009-12-11 19:24:15 +1000563void
564nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
565{
566 bool is_iomem;
567 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900568
Alexandre Courbot36a471b2016-07-13 15:29:35 +0900569 mem += index;
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900570
Ben Skeggs6ee73862009-12-11 19:24:15 +1000571 if (is_iomem)
572 iowrite16_native(val, (void __force __iomem *)mem);
573 else
574 *mem = val;
575}
576
577u32
578nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
579{
580 bool is_iomem;
581 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900582
Alexandre Courbot36a471b2016-07-13 15:29:35 +0900583 mem += index;
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900584
Ben Skeggs6ee73862009-12-11 19:24:15 +1000585 if (is_iomem)
586 return ioread32_native((void __force __iomem *)mem);
587 else
588 return *mem;
589}
590
591void
592nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
593{
594 bool is_iomem;
595 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900596
Alexandre Courbot36a471b2016-07-13 15:29:35 +0900597 mem += index;
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900598
Ben Skeggs6ee73862009-12-11 19:24:15 +1000599 if (is_iomem)
600 iowrite32_native(val, (void __force __iomem *)mem);
601 else
602 *mem = val;
603}
604
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400605static struct ttm_tt *
Ben Skeggsebb945a2012-07-20 08:17:34 +1000606nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
607 uint32_t page_flags, struct page *dummy_read)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000608{
Daniel Vettera7fb8a22015-09-09 16:45:52 +0200609#if IS_ENABLED(CONFIG_AGP)
Ben Skeggsebb945a2012-07-20 08:17:34 +1000610 struct nouveau_drm *drm = nouveau_bdev(bdev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000611
Ben Skeggs340b0e72015-08-20 14:54:23 +1000612 if (drm->agp.bridge) {
613 return ttm_agp_tt_create(bdev, drm->agp.bridge, size,
Ben Skeggsebb945a2012-07-20 08:17:34 +1000614 page_flags, dummy_read);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000615 }
Max Filippovdf1b4b92012-10-14 01:58:26 +0400616#endif
Ben Skeggs6ee73862009-12-11 19:24:15 +1000617
Ben Skeggsebb945a2012-07-20 08:17:34 +1000618 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000619}
620
621static int
622nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
623{
624 /* We'll do this from user space. */
625 return 0;
626}
627
628static int
629nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
630 struct ttm_mem_type_manager *man)
631{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000632 struct nouveau_drm *drm = nouveau_bdev(bdev);
Ben Skeggsb3472022017-11-01 03:56:20 +1000633 struct nvif_mmu *mmu = &drm->client.mmu;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000634
635 switch (type) {
636 case TTM_PL_SYSTEM:
637 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
638 man->available_caching = TTM_PL_MASK_CACHING;
639 man->default_caching = TTM_PL_FLAG_CACHED;
640 break;
641 case TTM_PL_VRAM:
Alexandre Courbote2a4e782014-06-27 19:28:50 +0900642 man->flags = TTM_MEMTYPE_FLAG_FIXED |
643 TTM_MEMTYPE_FLAG_MAPPABLE;
644 man->available_caching = TTM_PL_FLAG_UNCACHED |
645 TTM_PL_FLAG_WC;
646 man->default_caching = TTM_PL_FLAG_WC;
647
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000648 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
Alexandre Courbote2a4e782014-06-27 19:28:50 +0900649 /* Some BARs do not support being ioremapped WC */
Ben Skeggsb3472022017-11-01 03:56:20 +1000650 const u8 type = mmu->type[drm->ttm.type_vram].type;
651 if (type & NVIF_MEM_UNCACHED) {
Alexandre Courbote2a4e782014-06-27 19:28:50 +0900652 man->available_caching = TTM_PL_FLAG_UNCACHED;
653 man->default_caching = TTM_PL_FLAG_UNCACHED;
654 }
655
Ben Skeggs573a2a32010-08-25 15:26:04 +1000656 man->func = &nouveau_vram_manager;
Ben Skeggsf869ef82010-11-15 11:53:16 +1000657 man->io_reserve_fastpath = false;
658 man->use_io_reserve_lru = true;
659 } else {
Ben Skeggs573a2a32010-08-25 15:26:04 +1000660 man->func = &ttm_bo_manager_func;
Ben Skeggsf869ef82010-11-15 11:53:16 +1000661 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000662 break;
663 case TTM_PL_TT:
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000664 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
Ben Skeggs26c0c9e2011-02-10 12:59:51 +1000665 man->func = &nouveau_gart_manager;
666 else
Ben Skeggs340b0e72015-08-20 14:54:23 +1000667 if (!drm->agp.bridge)
Ben Skeggs3863c9b2012-07-14 19:09:17 +1000668 man->func = &nv04_gart_manager;
669 else
Ben Skeggs26c0c9e2011-02-10 12:59:51 +1000670 man->func = &ttm_bo_manager_func;
Ben Skeggsebb945a2012-07-20 08:17:34 +1000671
Ben Skeggs340b0e72015-08-20 14:54:23 +1000672 if (drm->agp.bridge) {
Jerome Glissef32f02f2010-04-09 14:39:25 +0200673 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
Francisco Jereza3d487e2010-11-20 22:11:22 +0100674 man->available_caching = TTM_PL_FLAG_UNCACHED |
675 TTM_PL_FLAG_WC;
676 man->default_caching = TTM_PL_FLAG_WC;
Ben Skeggsebb945a2012-07-20 08:17:34 +1000677 } else {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000678 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
679 TTM_MEMTYPE_FLAG_CMA;
680 man->available_caching = TTM_PL_MASK_CACHING;
681 man->default_caching = TTM_PL_FLAG_CACHED;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000682 }
Ben Skeggsebb945a2012-07-20 08:17:34 +1000683
Ben Skeggs6ee73862009-12-11 19:24:15 +1000684 break;
685 default:
Ben Skeggs6ee73862009-12-11 19:24:15 +1000686 return -EINVAL;
687 }
688 return 0;
689}
690
691static void
692nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
693{
694 struct nouveau_bo *nvbo = nouveau_bo(bo);
695
696 switch (bo->mem.mem_type) {
Francisco Jerez22fbd532009-12-11 18:40:17 +0100697 case TTM_PL_VRAM:
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100698 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
699 TTM_PL_FLAG_SYSTEM);
Francisco Jerez22fbd532009-12-11 18:40:17 +0100700 break;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000701 default:
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100702 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000703 break;
704 }
Francisco Jerez22fbd532009-12-11 18:40:17 +0100705
706 *pl = nvbo->placement;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000707}
708
709
Ben Skeggs6ee73862009-12-11 19:24:15 +1000710static int
Ben Skeggs49981042012-08-06 19:38:25 +1000711nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
712{
713 int ret = RING_SPACE(chan, 2);
714 if (ret == 0) {
715 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
Ben Skeggs00fc6f62013-07-09 14:20:15 +1000716 OUT_RING (chan, handle & 0x0000ffff);
Ben Skeggs49981042012-08-06 19:38:25 +1000717 FIRE_RING (chan);
718 }
719 return ret;
720}
721
722static int
Ben Skeggsc6b7e892012-03-20 14:36:04 +1000723nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000724 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggsc6b7e892012-03-20 14:36:04 +1000725{
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000726 struct nouveau_mem *mem = nouveau_mem(old_reg);
Ben Skeggsc6b7e892012-03-20 14:36:04 +1000727 int ret = RING_SPACE(chan, 10);
728 if (ret == 0) {
Ben Skeggs6d597022012-04-01 21:09:13 +1000729 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000730 OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
731 OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
732 OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
733 OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
Ben Skeggsc6b7e892012-03-20 14:36:04 +1000734 OUT_RING (chan, PAGE_SIZE);
735 OUT_RING (chan, PAGE_SIZE);
736 OUT_RING (chan, PAGE_SIZE);
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000737 OUT_RING (chan, new_reg->num_pages);
Ben Skeggs6d597022012-04-01 21:09:13 +1000738 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
Ben Skeggsc6b7e892012-03-20 14:36:04 +1000739 }
740 return ret;
741}
742
743static int
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000744nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
745{
746 int ret = RING_SPACE(chan, 2);
747 if (ret == 0) {
748 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
749 OUT_RING (chan, handle);
750 }
751 return ret;
752}
753
754static int
Ben Skeggs1a460982012-05-04 15:17:28 +1000755nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000756 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggs1a460982012-05-04 15:17:28 +1000757{
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000758 struct nouveau_mem *mem = nouveau_mem(old_reg);
759 u64 src_offset = mem->vma[0].addr;
760 u64 dst_offset = mem->vma[1].addr;
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000761 u32 page_count = new_reg->num_pages;
Ben Skeggs1a460982012-05-04 15:17:28 +1000762 int ret;
763
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000764 page_count = new_reg->num_pages;
Ben Skeggs1a460982012-05-04 15:17:28 +1000765 while (page_count) {
766 int line_count = (page_count > 8191) ? 8191 : page_count;
767
768 ret = RING_SPACE(chan, 11);
769 if (ret)
770 return ret;
771
772 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
773 OUT_RING (chan, upper_32_bits(src_offset));
774 OUT_RING (chan, lower_32_bits(src_offset));
775 OUT_RING (chan, upper_32_bits(dst_offset));
776 OUT_RING (chan, lower_32_bits(dst_offset));
777 OUT_RING (chan, PAGE_SIZE);
778 OUT_RING (chan, PAGE_SIZE);
779 OUT_RING (chan, PAGE_SIZE);
780 OUT_RING (chan, line_count);
781 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
782 OUT_RING (chan, 0x00000110);
783
784 page_count -= line_count;
785 src_offset += (PAGE_SIZE * line_count);
786 dst_offset += (PAGE_SIZE * line_count);
787 }
788
789 return 0;
790}
791
792static int
Ben Skeggs183720b2010-12-09 15:17:10 +1000793nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000794 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggs183720b2010-12-09 15:17:10 +1000795{
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000796 struct nouveau_mem *mem = nouveau_mem(old_reg);
797 u64 src_offset = mem->vma[0].addr;
798 u64 dst_offset = mem->vma[1].addr;
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000799 u32 page_count = new_reg->num_pages;
Ben Skeggs183720b2010-12-09 15:17:10 +1000800 int ret;
801
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000802 page_count = new_reg->num_pages;
Ben Skeggs183720b2010-12-09 15:17:10 +1000803 while (page_count) {
804 int line_count = (page_count > 2047) ? 2047 : page_count;
805
806 ret = RING_SPACE(chan, 12);
807 if (ret)
808 return ret;
809
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000810 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
Ben Skeggs183720b2010-12-09 15:17:10 +1000811 OUT_RING (chan, upper_32_bits(dst_offset));
812 OUT_RING (chan, lower_32_bits(dst_offset));
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000813 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
Ben Skeggs183720b2010-12-09 15:17:10 +1000814 OUT_RING (chan, upper_32_bits(src_offset));
815 OUT_RING (chan, lower_32_bits(src_offset));
816 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
817 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
818 OUT_RING (chan, PAGE_SIZE); /* line_length */
819 OUT_RING (chan, line_count);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000820 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
Ben Skeggs183720b2010-12-09 15:17:10 +1000821 OUT_RING (chan, 0x00100110);
822
823 page_count -= line_count;
824 src_offset += (PAGE_SIZE * line_count);
825 dst_offset += (PAGE_SIZE * line_count);
826 }
827
828 return 0;
829}
830
831static int
Ben Skeggsfdf53242012-05-04 15:15:12 +1000832nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000833 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggsfdf53242012-05-04 15:15:12 +1000834{
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000835 struct nouveau_mem *mem = nouveau_mem(old_reg);
836 u64 src_offset = mem->vma[0].addr;
837 u64 dst_offset = mem->vma[1].addr;
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000838 u32 page_count = new_reg->num_pages;
Ben Skeggsfdf53242012-05-04 15:15:12 +1000839 int ret;
840
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000841 page_count = new_reg->num_pages;
Ben Skeggsfdf53242012-05-04 15:15:12 +1000842 while (page_count) {
843 int line_count = (page_count > 8191) ? 8191 : page_count;
844
845 ret = RING_SPACE(chan, 11);
846 if (ret)
847 return ret;
848
849 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
850 OUT_RING (chan, upper_32_bits(src_offset));
851 OUT_RING (chan, lower_32_bits(src_offset));
852 OUT_RING (chan, upper_32_bits(dst_offset));
853 OUT_RING (chan, lower_32_bits(dst_offset));
854 OUT_RING (chan, PAGE_SIZE);
855 OUT_RING (chan, PAGE_SIZE);
856 OUT_RING (chan, PAGE_SIZE);
857 OUT_RING (chan, line_count);
858 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
859 OUT_RING (chan, 0x00000110);
860
861 page_count -= line_count;
862 src_offset += (PAGE_SIZE * line_count);
863 dst_offset += (PAGE_SIZE * line_count);
864 }
865
866 return 0;
867}
868
869static int
Ben Skeggs5490e5d2012-05-04 14:34:16 +1000870nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000871 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggs5490e5d2012-05-04 14:34:16 +1000872{
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000873 struct nouveau_mem *mem = nouveau_mem(old_reg);
Ben Skeggs5490e5d2012-05-04 14:34:16 +1000874 int ret = RING_SPACE(chan, 7);
875 if (ret == 0) {
876 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000877 OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
878 OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
879 OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
880 OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
Ben Skeggs5490e5d2012-05-04 14:34:16 +1000881 OUT_RING (chan, 0x00000000 /* COPY */);
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000882 OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
Ben Skeggs5490e5d2012-05-04 14:34:16 +1000883 }
884 return ret;
885}
886
887static int
Ben Skeggs4c193d22012-05-04 14:21:15 +1000888nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000889 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggs4c193d22012-05-04 14:21:15 +1000890{
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000891 struct nouveau_mem *mem = nouveau_mem(old_reg);
Ben Skeggs4c193d22012-05-04 14:21:15 +1000892 int ret = RING_SPACE(chan, 7);
893 if (ret == 0) {
894 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000895 OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000896 OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
897 OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
898 OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
899 OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
Ben Skeggs4c193d22012-05-04 14:21:15 +1000900 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
901 }
902 return ret;
903}
904
905static int
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000906nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
907{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000908 int ret = RING_SPACE(chan, 6);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000909 if (ret == 0) {
Ben Skeggsebb945a2012-07-20 08:17:34 +1000910 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
911 OUT_RING (chan, handle);
912 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
Ben Skeggsf45f55c2014-08-10 04:10:23 +1000913 OUT_RING (chan, chan->drm->ntfy.handle);
914 OUT_RING (chan, chan->vram.handle);
915 OUT_RING (chan, chan->vram.handle);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000916 }
917
918 return ret;
919}
920
921static int
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000922nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000923 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000924{
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000925 struct nouveau_mem *mem = nouveau_mem(old_reg);
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000926 u64 length = (new_reg->num_pages << PAGE_SHIFT);
Ben Skeggs9ce523c2017-11-01 03:56:19 +1000927 u64 src_offset = mem->vma[0].addr;
928 u64 dst_offset = mem->vma[1].addr;
929 int src_tiled = !!mem->kind;
930 int dst_tiled = !!nouveau_mem(new_reg)->kind;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000931 int ret;
932
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000933 while (length) {
934 u32 amount, stride, height;
935
Maarten Lankhorstce8f7692013-11-12 13:34:08 +0100936 ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
937 if (ret)
938 return ret;
939
Ben Skeggs5220b3c2010-09-23 15:21:17 +1000940 amount = min(length, (u64)(4 * 1024 * 1024));
941 stride = 16 * 4;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000942 height = amount / stride;
943
Maarten Lankhorstce8f7692013-11-12 13:34:08 +0100944 if (src_tiled) {
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000945 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000946 OUT_RING (chan, 0);
Ben Skeggs5220b3c2010-09-23 15:21:17 +1000947 OUT_RING (chan, 0);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000948 OUT_RING (chan, stride);
949 OUT_RING (chan, height);
950 OUT_RING (chan, 1);
951 OUT_RING (chan, 0);
952 OUT_RING (chan, 0);
953 } else {
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000954 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000955 OUT_RING (chan, 1);
956 }
Maarten Lankhorstce8f7692013-11-12 13:34:08 +0100957 if (dst_tiled) {
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000958 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000959 OUT_RING (chan, 0);
Ben Skeggs5220b3c2010-09-23 15:21:17 +1000960 OUT_RING (chan, 0);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000961 OUT_RING (chan, stride);
962 OUT_RING (chan, height);
963 OUT_RING (chan, 1);
964 OUT_RING (chan, 0);
965 OUT_RING (chan, 0);
966 } else {
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000967 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000968 OUT_RING (chan, 1);
969 }
970
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000971 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000972 OUT_RING (chan, upper_32_bits(src_offset));
973 OUT_RING (chan, upper_32_bits(dst_offset));
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000974 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000975 OUT_RING (chan, lower_32_bits(src_offset));
976 OUT_RING (chan, lower_32_bits(dst_offset));
977 OUT_RING (chan, stride);
978 OUT_RING (chan, stride);
979 OUT_RING (chan, stride);
980 OUT_RING (chan, height);
981 OUT_RING (chan, 0x00000101);
982 OUT_RING (chan, 0x00000000);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000983 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000984 OUT_RING (chan, 0);
985
986 length -= amount;
987 src_offset += amount;
988 dst_offset += amount;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000989 }
990
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000991 return 0;
992}
993
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000994static int
995nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
996{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000997 int ret = RING_SPACE(chan, 4);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000998 if (ret == 0) {
Ben Skeggsebb945a2012-07-20 08:17:34 +1000999 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
1000 OUT_RING (chan, handle);
1001 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
Ben Skeggsf45f55c2014-08-10 04:10:23 +10001002 OUT_RING (chan, chan->drm->ntfy.handle);
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001003 }
1004
1005 return ret;
1006}
1007
Ben Skeggsa6704782011-02-16 09:10:20 +10001008static inline uint32_t
1009nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001010 struct nouveau_channel *chan, struct ttm_mem_reg *reg)
Ben Skeggsa6704782011-02-16 09:10:20 +10001011{
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001012 if (reg->mem_type == TTM_PL_TT)
Ben Skeggsebb945a2012-07-20 08:17:34 +10001013 return NvDmaTT;
Ben Skeggsf45f55c2014-08-10 04:10:23 +10001014 return chan->vram.handle;
Ben Skeggsa6704782011-02-16 09:10:20 +10001015}
1016
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001017static int
1018nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001019 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001020{
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001021 u32 src_offset = old_reg->start << PAGE_SHIFT;
1022 u32 dst_offset = new_reg->start << PAGE_SHIFT;
1023 u32 page_count = new_reg->num_pages;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001024 int ret;
1025
1026 ret = RING_SPACE(chan, 3);
1027 if (ret)
1028 return ret;
1029
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001030 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001031 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_reg));
1032 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_reg));
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001033
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001034 page_count = new_reg->num_pages;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001035 while (page_count) {
1036 int line_count = (page_count > 2047) ? 2047 : page_count;
1037
Ben Skeggs6ee73862009-12-11 19:24:15 +10001038 ret = RING_SPACE(chan, 11);
1039 if (ret)
1040 return ret;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001041
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001042 BEGIN_NV04(chan, NvSubCopy,
Ben Skeggs6ee73862009-12-11 19:24:15 +10001043 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001044 OUT_RING (chan, src_offset);
1045 OUT_RING (chan, dst_offset);
1046 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
1047 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
1048 OUT_RING (chan, PAGE_SIZE); /* line_length */
1049 OUT_RING (chan, line_count);
1050 OUT_RING (chan, 0x00000101);
1051 OUT_RING (chan, 0x00000000);
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001052 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001053 OUT_RING (chan, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001054
1055 page_count -= line_count;
1056 src_offset += (PAGE_SIZE * line_count);
1057 dst_offset += (PAGE_SIZE * line_count);
1058 }
1059
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001060 return 0;
1061}
1062
1063static int
Ben Skeggs3c57d852013-11-22 10:35:25 +10001064nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001065 struct ttm_mem_reg *reg)
Ben Skeggsd2f966662011-06-06 20:54:42 +10001066{
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001067 struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
1068 struct nouveau_mem *new_mem = nouveau_mem(reg);
Ben Skeggsd7722132017-11-01 03:56:20 +10001069 struct nvif_vmm *vmm = &drm->client.vmm.vmm;
Ben Skeggsd2f966662011-06-06 20:54:42 +10001070 int ret;
1071
Ben Skeggsd7722132017-11-01 03:56:20 +10001072 ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0,
1073 old_mem->mem.size, &old_mem->vma[0]);
Ben Skeggsd2f966662011-06-06 20:54:42 +10001074 if (ret)
1075 return ret;
1076
Ben Skeggsd7722132017-11-01 03:56:20 +10001077 ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0,
1078 new_mem->mem.size, &old_mem->vma[1]);
1079 if (ret)
1080 goto done;
Ben Skeggs3c57d852013-11-22 10:35:25 +10001081
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001082 ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]);
1083 if (ret)
1084 goto done;
1085
1086 ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]);
1087done:
1088 if (ret) {
Ben Skeggsd7722132017-11-01 03:56:20 +10001089 nvif_vmm_put(vmm, &old_mem->vma[1]);
1090 nvif_vmm_put(vmm, &old_mem->vma[0]);
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001091 }
Ben Skeggsd2f966662011-06-06 20:54:42 +10001092 return 0;
1093}
1094
1095static int
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001096nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001097 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001098{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001099 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
Dave Jones1934a2a2013-09-17 17:26:34 -04001100 struct nouveau_channel *chan = drm->ttm.chan;
Ben Skeggsa01ca782015-08-20 14:54:15 +10001101 struct nouveau_cli *cli = (void *)chan->user.client;
Ben Skeggs35b81412013-11-22 10:39:57 +10001102 struct nouveau_fence *fence;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001103 int ret;
1104
Ben Skeggsd2f966662011-06-06 20:54:42 +10001105 /* create temporary vmas for the transfer and attach them to the
Ben Skeggsbe83cd42015-01-14 15:36:34 +10001106 * old nvkm_mem node, these will get cleaned up after ttm has
Ben Skeggsd2f966662011-06-06 20:54:42 +10001107 * destroyed the ttm_mem_reg
Ben Skeggs3425df42011-02-10 11:22:12 +10001108 */
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001109 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001110 ret = nouveau_bo_move_prep(drm, bo, new_reg);
Ben Skeggsd2f966662011-06-06 20:54:42 +10001111 if (ret)
Ben Skeggs3c57d852013-11-22 10:35:25 +10001112 return ret;
Ben Skeggs3425df42011-02-10 11:22:12 +10001113 }
1114
Ben Skeggs0ad72862014-08-10 04:10:22 +10001115 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
Maarten Lankhorste3be4c22014-09-16 11:15:07 +02001116 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
Ben Skeggs6a6b73f2010-10-05 16:53:48 +10001117 if (ret == 0) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001118 ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
Ben Skeggs35b81412013-11-22 10:39:57 +10001119 if (ret == 0) {
1120 ret = nouveau_fence_new(chan, false, &fence);
1121 if (ret == 0) {
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +02001122 ret = ttm_bo_move_accel_cleanup(bo,
1123 &fence->base,
Ben Skeggs35b81412013-11-22 10:39:57 +10001124 evict,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001125 new_reg);
Ben Skeggs35b81412013-11-22 10:39:57 +10001126 nouveau_fence_unref(&fence);
1127 }
1128 }
Ben Skeggs6a6b73f2010-10-05 16:53:48 +10001129 }
Ben Skeggs0ad72862014-08-10 04:10:22 +10001130 mutex_unlock(&cli->mutex);
Ben Skeggs6a6b73f2010-10-05 16:53:48 +10001131 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001132}
1133
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001134void
Ben Skeggs49981042012-08-06 19:38:25 +10001135nouveau_bo_move_init(struct nouveau_drm *drm)
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001136{
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001137 static const struct {
1138 const char *name;
Ben Skeggs1a460982012-05-04 15:17:28 +10001139 int engine;
Ben Skeggs315a8b22015-08-20 14:54:16 +10001140 s32 oclass;
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001141 int (*exec)(struct nouveau_channel *,
1142 struct ttm_buffer_object *,
1143 struct ttm_mem_reg *, struct ttm_mem_reg *);
1144 int (*init)(struct nouveau_channel *, u32 handle);
1145 } _methods[] = {
Ben Skeggs146cfe22016-07-09 10:41:01 +10001146 { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
1147 { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
Ben Skeggs8e7e15862016-07-09 10:41:01 +10001148 { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
1149 { "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
Ben Skeggs990b4542015-04-14 11:50:35 +10001150 { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
1151 { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
Ben Skeggs00fc6f62013-07-09 14:20:15 +10001152 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
Ben Skeggs49981042012-08-06 19:38:25 +10001153 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
Ben Skeggs1a460982012-05-04 15:17:28 +10001154 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1155 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1156 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1157 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1158 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1159 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1160 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
Ben Skeggs5490e5d2012-05-04 14:34:16 +10001161 {},
Ben Skeggs1a460982012-05-04 15:17:28 +10001162 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001163 }, *mthd = _methods;
1164 const char *name = "CPU";
1165 int ret;
1166
1167 do {
Ben Skeggs49981042012-08-06 19:38:25 +10001168 struct nouveau_channel *chan;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001169
Ben Skeggs00fc6f62013-07-09 14:20:15 +10001170 if (mthd->engine)
Ben Skeggs49981042012-08-06 19:38:25 +10001171 chan = drm->cechan;
1172 else
1173 chan = drm->channel;
1174 if (chan == NULL)
1175 continue;
1176
Ben Skeggsa01ca782015-08-20 14:54:15 +10001177 ret = nvif_object_init(&chan->user,
Ben Skeggs0ad72862014-08-10 04:10:22 +10001178 mthd->oclass | (mthd->engine << 16),
1179 mthd->oclass, NULL, 0,
1180 &drm->ttm.copy);
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001181 if (ret == 0) {
Ben Skeggs0ad72862014-08-10 04:10:22 +10001182 ret = mthd->init(chan, drm->ttm.copy.handle);
Ben Skeggsebb945a2012-07-20 08:17:34 +10001183 if (ret) {
Ben Skeggs0ad72862014-08-10 04:10:22 +10001184 nvif_object_fini(&drm->ttm.copy);
Ben Skeggsebb945a2012-07-20 08:17:34 +10001185 continue;
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001186 }
Ben Skeggsebb945a2012-07-20 08:17:34 +10001187
1188 drm->ttm.move = mthd->exec;
Ben Skeggs1bb3f6a2013-07-08 10:40:35 +10001189 drm->ttm.chan = chan;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001190 name = mthd->name;
1191 break;
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001192 }
1193 } while ((++mthd)->exec);
1194
Ben Skeggsebb945a2012-07-20 08:17:34 +10001195 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001196}
1197
Ben Skeggs6ee73862009-12-11 19:24:15 +10001198static int
1199nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001200 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
Ben Skeggs6ee73862009-12-11 19:24:15 +10001201{
Christian Königf1217ed2014-08-27 13:16:04 +02001202 struct ttm_place placement_memtype = {
1203 .fpfn = 0,
1204 .lpfn = 0,
1205 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1206 };
Ben Skeggs6ee73862009-12-11 19:24:15 +10001207 struct ttm_placement placement;
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001208 struct ttm_mem_reg tmp_reg;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001209 int ret;
1210
Ben Skeggs6ee73862009-12-11 19:24:15 +10001211 placement.num_placement = placement.num_busy_placement = 1;
Francisco Jerez77e2b5e2009-12-16 19:05:00 +01001212 placement.placement = placement.busy_placement = &placement_memtype;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001213
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001214 tmp_reg = *new_reg;
1215 tmp_reg.mm_node = NULL;
1216 ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001217 if (ret)
1218 return ret;
1219
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001220 ret = ttm_tt_bind(bo->ttm, &tmp_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001221 if (ret)
1222 goto out;
1223
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001224 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001225 if (ret)
1226 goto out;
1227
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001228 ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001229out:
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001230 ttm_bo_mem_put(bo, &tmp_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001231 return ret;
1232}
1233
1234static int
1235nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001236 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
Ben Skeggs6ee73862009-12-11 19:24:15 +10001237{
Christian Königf1217ed2014-08-27 13:16:04 +02001238 struct ttm_place placement_memtype = {
1239 .fpfn = 0,
1240 .lpfn = 0,
1241 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1242 };
Ben Skeggs6ee73862009-12-11 19:24:15 +10001243 struct ttm_placement placement;
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001244 struct ttm_mem_reg tmp_reg;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001245 int ret;
1246
Ben Skeggs6ee73862009-12-11 19:24:15 +10001247 placement.num_placement = placement.num_busy_placement = 1;
Francisco Jerez77e2b5e2009-12-16 19:05:00 +01001248 placement.placement = placement.busy_placement = &placement_memtype;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001249
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001250 tmp_reg = *new_reg;
1251 tmp_reg.mm_node = NULL;
1252 ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001253 if (ret)
1254 return ret;
1255
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001256 ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001257 if (ret)
1258 goto out;
1259
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001260 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001261 if (ret)
1262 goto out;
1263
1264out:
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001265 ttm_bo_mem_put(bo, &tmp_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001266 return ret;
1267}
1268
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001269static void
Nicolai Hähnle66257db2016-12-15 17:23:49 +01001270nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001271 struct ttm_mem_reg *new_reg)
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001272{
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001273 struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001274 struct nouveau_bo *nvbo = nouveau_bo(bo);
Ben Skeggs24e83752017-11-01 03:56:19 +10001275 struct nouveau_vma *vma;
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001276
Ben Skeggs9f1feed2012-01-25 15:34:22 +10001277 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1278 if (bo->destroy != nouveau_bo_del_ttm)
1279 return;
1280
Ben Skeggsa48296a2017-11-01 03:56:19 +10001281 if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001282 mem->mem.page == nvbo->page) {
Ben Skeggsa48296a2017-11-01 03:56:19 +10001283 list_for_each_entry(vma, &nvbo->vma_list, head) {
Ben Skeggs24e83752017-11-01 03:56:19 +10001284 nouveau_vma_map(vma, mem);
Ben Skeggsa48296a2017-11-01 03:56:19 +10001285 }
1286 } else {
1287 list_for_each_entry(vma, &nvbo->vma_list, head) {
Ben Skeggs10dcab32016-12-12 17:52:45 +10001288 WARN_ON(ttm_bo_wait(bo, false, false));
Ben Skeggs24e83752017-11-01 03:56:19 +10001289 nouveau_vma_unmap(vma);
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001290 }
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001291 }
1292}
1293
Ben Skeggs6ee73862009-12-11 19:24:15 +10001294static int
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001295nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg,
Ben Skeggsebb945a2012-07-20 08:17:34 +10001296 struct nouveau_drm_tile **new_tile)
Ben Skeggs6ee73862009-12-11 19:24:15 +10001297{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001298 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1299 struct drm_device *dev = drm->dev;
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001300 struct nouveau_bo *nvbo = nouveau_bo(bo);
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001301 u64 offset = new_reg->start << PAGE_SHIFT;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001302
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001303 *new_tile = NULL;
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001304 if (new_reg->mem_type != TTM_PL_VRAM)
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001305 return 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001306
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001307 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001308 *new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
Ben Skeggs7760a2e2017-11-01 03:56:19 +10001309 nvbo->mode, nvbo->zeta);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001310 }
1311
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001312 return 0;
1313}
Ben Skeggs6ee73862009-12-11 19:24:15 +10001314
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001315static void
1316nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
Ben Skeggsebb945a2012-07-20 08:17:34 +10001317 struct nouveau_drm_tile *new_tile,
1318 struct nouveau_drm_tile **old_tile)
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001319{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001320 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1321 struct drm_device *dev = drm->dev;
Chris Wilsonf54d1862016-10-25 13:00:45 +01001322 struct dma_fence *fence = reservation_object_get_excl(bo->resv);
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001323
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +02001324 nv10_bo_put_tile_region(dev, *old_tile, fence);
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001325 *old_tile = new_tile;
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001326}
1327
1328static int
1329nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001330 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001331{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001332 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001333 struct nouveau_bo *nvbo = nouveau_bo(bo);
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001334 struct ttm_mem_reg *old_reg = &bo->mem;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001335 struct nouveau_drm_tile *new_tile = NULL;
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001336 int ret = 0;
1337
Christian König88932a72016-06-06 10:17:53 +02001338 ret = ttm_bo_wait(bo, intr, no_wait_gpu);
1339 if (ret)
1340 return ret;
1341
Alexandre Courbot5be5a152014-10-27 18:11:52 +09001342 if (nvbo->pin_refcnt)
1343 NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
1344
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001345 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001346 ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001347 if (ret)
1348 return ret;
1349 }
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001350
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001351 /* Fake bo copy. */
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001352 if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
Ben Skeggs6ee73862009-12-11 19:24:15 +10001353 BUG_ON(bo->mem.mm_node != NULL);
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001354 bo->mem = *new_reg;
1355 new_reg->mm_node = NULL;
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001356 goto out;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001357 }
1358
Ben Skeggscef9e992013-11-22 10:52:54 +10001359 /* Hardware assisted copy. */
1360 if (drm->ttm.move) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001361 if (new_reg->mem_type == TTM_PL_SYSTEM)
Ben Skeggscef9e992013-11-22 10:52:54 +10001362 ret = nouveau_bo_move_flipd(bo, evict, intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001363 no_wait_gpu, new_reg);
1364 else if (old_reg->mem_type == TTM_PL_SYSTEM)
Ben Skeggscef9e992013-11-22 10:52:54 +10001365 ret = nouveau_bo_move_flips(bo, evict, intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001366 no_wait_gpu, new_reg);
Ben Skeggscef9e992013-11-22 10:52:54 +10001367 else
1368 ret = nouveau_bo_move_m2mf(bo, evict, intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001369 no_wait_gpu, new_reg);
Ben Skeggscef9e992013-11-22 10:52:54 +10001370 if (!ret)
1371 goto out;
Ben Skeggsb8a6a802010-08-27 11:55:43 +10001372 }
1373
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001374 /* Fallback to software copy. */
Christian König8aa6d4f2016-04-06 11:12:04 +02001375 ret = ttm_bo_wait(bo, intr, no_wait_gpu);
Ben Skeggscef9e992013-11-22 10:52:54 +10001376 if (ret == 0)
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001377 ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_reg);
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001378
1379out:
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001380 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001381 if (ret)
1382 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1383 else
1384 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1385 }
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001386
1387 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001388}
1389
1390static int
1391nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1392{
David Herrmannacb46522013-08-25 18:28:59 +02001393 struct nouveau_bo *nvbo = nouveau_bo(bo);
1394
David Herrmannd9a1f0b2016-09-01 14:48:33 +02001395 return drm_vma_node_verify_access(&nvbo->gem.vma_node,
1396 filp->private_data);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001397}
1398
Jerome Glissef32f02f2010-04-09 14:39:25 +02001399static int
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001400nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
Jerome Glissef32f02f2010-04-09 14:39:25 +02001401{
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001402 struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type];
Ben Skeggsebb945a2012-07-20 08:17:34 +10001403 struct nouveau_drm *drm = nouveau_bdev(bdev);
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001404 struct nvkm_device *device = nvxx_device(&drm->client.device);
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001405 struct nouveau_mem *mem = nouveau_mem(reg);
Jerome Glissef32f02f2010-04-09 14:39:25 +02001406
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001407 reg->bus.addr = NULL;
1408 reg->bus.offset = 0;
1409 reg->bus.size = reg->num_pages << PAGE_SHIFT;
1410 reg->bus.base = 0;
1411 reg->bus.is_iomem = false;
Jerome Glissef32f02f2010-04-09 14:39:25 +02001412 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1413 return -EINVAL;
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001414 switch (reg->mem_type) {
Jerome Glissef32f02f2010-04-09 14:39:25 +02001415 case TTM_PL_SYSTEM:
1416 /* System memory */
1417 return 0;
1418 case TTM_PL_TT:
Daniel Vettera7fb8a22015-09-09 16:45:52 +02001419#if IS_ENABLED(CONFIG_AGP)
Ben Skeggs340b0e72015-08-20 14:54:23 +10001420 if (drm->agp.bridge) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001421 reg->bus.offset = reg->start << PAGE_SHIFT;
1422 reg->bus.base = drm->agp.base;
1423 reg->bus.is_iomem = !drm->agp.cma;
Jerome Glissef32f02f2010-04-09 14:39:25 +02001424 }
1425#endif
Ben Skeggsd7722132017-11-01 03:56:20 +10001426 if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind)
Maarten Lankhorsta5540902013-11-12 13:34:09 +01001427 /* untiled */
1428 break;
1429 /* fallthrough, tiled memory */
Jerome Glissef32f02f2010-04-09 14:39:25 +02001430 case TTM_PL_VRAM:
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001431 reg->bus.offset = reg->start << PAGE_SHIFT;
1432 reg->bus.base = device->func->resource_addr(device, 1);
1433 reg->bus.is_iomem = true;
Ben Skeggsd7722132017-11-01 03:56:20 +10001434 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
1435 union {
1436 struct nv50_mem_map_v0 nv50;
1437 struct gf100_mem_map_v0 gf100;
1438 } args;
1439 u64 handle, length;
1440 u32 argc = 0;
1441 int ret;
Ben Skeggs3863c9b2012-07-14 19:09:17 +10001442
Ben Skeggsd7722132017-11-01 03:56:20 +10001443 switch (mem->mem.object.oclass) {
1444 case NVIF_CLASS_MEM_NV50:
1445 args.nv50.version = 0;
1446 args.nv50.ro = 0;
1447 args.nv50.kind = mem->kind;
1448 args.nv50.comp = mem->comp;
1449 break;
1450 case NVIF_CLASS_MEM_GF100:
1451 args.gf100.version = 0;
1452 args.gf100.ro = 0;
1453 args.gf100.kind = mem->kind;
1454 break;
1455 default:
1456 WARN_ON(1);
1457 break;
1458 }
Ben Skeggs3863c9b2012-07-14 19:09:17 +10001459
Ben Skeggsd7722132017-11-01 03:56:20 +10001460 ret = nvif_object_map_handle(&mem->mem.object,
1461 &argc, argc,
1462 &handle, &length);
1463 if (ret != 1)
1464 return ret ? ret : -EINVAL;
1465
1466 reg->bus.base = 0;
1467 reg->bus.offset = handle;
Ben Skeggs3863c9b2012-07-14 19:09:17 +10001468 }
Jerome Glissef32f02f2010-04-09 14:39:25 +02001469 break;
1470 default:
1471 return -EINVAL;
1472 }
1473 return 0;
1474}
1475
1476static void
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001477nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
Jerome Glissef32f02f2010-04-09 14:39:25 +02001478{
Ben Skeggsd7722132017-11-01 03:56:20 +10001479 struct nouveau_drm *drm = nouveau_bdev(bdev);
Ben Skeggs9ce523c2017-11-01 03:56:19 +10001480 struct nouveau_mem *mem = nouveau_mem(reg);
Ben Skeggsf869ef82010-11-15 11:53:16 +10001481
Ben Skeggsd7722132017-11-01 03:56:20 +10001482 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
1483 switch (reg->mem_type) {
1484 case TTM_PL_TT:
1485 if (mem->kind)
1486 nvif_object_unmap_handle(&mem->mem.object);
1487 break;
1488 case TTM_PL_VRAM:
1489 nvif_object_unmap_handle(&mem->mem.object);
1490 break;
1491 default:
1492 break;
1493 }
1494 }
Jerome Glissef32f02f2010-04-09 14:39:25 +02001495}
1496
1497static int
1498nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1499{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001500 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
Ben Skeggse1429b42010-09-10 11:12:25 +10001501 struct nouveau_bo *nvbo = nouveau_bo(bo);
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001502 struct nvkm_device *device = nvxx_device(&drm->client.device);
Ben Skeggs7e8820f2015-08-20 14:54:23 +10001503 u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
Christian Königf1217ed2014-08-27 13:16:04 +02001504 int i, ret;
Ben Skeggse1429b42010-09-10 11:12:25 +10001505
1506 /* as long as the bo isn't in vram, and isn't tiled, we've got
1507 * nothing to do here.
1508 */
1509 if (bo->mem.mem_type != TTM_PL_VRAM) {
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001510 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
Ben Skeggs7760a2e2017-11-01 03:56:19 +10001511 !nvbo->kind)
Ben Skeggse1429b42010-09-10 11:12:25 +10001512 return 0;
Maarten Lankhorsta5540902013-11-12 13:34:09 +01001513
1514 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1515 nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1516
1517 ret = nouveau_bo_validate(nvbo, false, false);
1518 if (ret)
1519 return ret;
1520 }
1521 return 0;
Ben Skeggse1429b42010-09-10 11:12:25 +10001522 }
1523
1524 /* make sure bo is in mappable vram */
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001525 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
Maarten Lankhorsta5540902013-11-12 13:34:09 +01001526 bo->mem.start + bo->mem.num_pages < mappable)
Ben Skeggse1429b42010-09-10 11:12:25 +10001527 return 0;
1528
Christian Königf1217ed2014-08-27 13:16:04 +02001529 for (i = 0; i < nvbo->placement.num_placement; ++i) {
1530 nvbo->placements[i].fpfn = 0;
1531 nvbo->placements[i].lpfn = mappable;
1532 }
Ben Skeggse1429b42010-09-10 11:12:25 +10001533
Christian Königf1217ed2014-08-27 13:16:04 +02001534 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1535 nvbo->busy_placements[i].fpfn = 0;
1536 nvbo->busy_placements[i].lpfn = mappable;
1537 }
1538
Dave Airliec2848152012-05-18 15:31:12 +01001539 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001540 return nouveau_bo_validate(nvbo, false, false);
Jerome Glissef32f02f2010-04-09 14:39:25 +02001541}
1542
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001543static int
1544nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1545{
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001546 struct ttm_dma_tt *ttm_dma = (void *)ttm;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001547 struct nouveau_drm *drm;
Ben Skeggs359088d2017-11-01 03:56:19 +10001548 struct device *dev;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001549 unsigned i;
1550 int r;
Dave Airlie22b33e82012-04-02 11:53:06 +01001551 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001552
1553 if (ttm->state != tt_unpopulated)
1554 return 0;
1555
Dave Airlie22b33e82012-04-02 11:53:06 +01001556 if (slave && ttm->sg) {
1557 /* make userspace faulting work */
1558 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1559 ttm_dma->dma_address, ttm->num_pages);
1560 ttm->state = tt_unbound;
1561 return 0;
1562 }
1563
Ben Skeggsebb945a2012-07-20 08:17:34 +10001564 drm = nouveau_bdev(ttm->bdev);
Ben Skeggs359088d2017-11-01 03:56:19 +10001565 dev = drm->dev->dev;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001566
Daniel Vettera7fb8a22015-09-09 16:45:52 +02001567#if IS_ENABLED(CONFIG_AGP)
Ben Skeggs340b0e72015-08-20 14:54:23 +10001568 if (drm->agp.bridge) {
Jerome Glissedea7e0a2012-01-03 17:37:37 -05001569 return ttm_agp_tt_populate(ttm);
1570 }
1571#endif
1572
Alexandre Courbot9bcd38d2016-03-02 19:12:27 +09001573#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001574 if (swiotlb_nr_tbl()) {
Ben Skeggs359088d2017-11-01 03:56:19 +10001575 return ttm_dma_populate((void *)ttm, dev);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001576 }
1577#endif
1578
1579 r = ttm_pool_populate(ttm);
1580 if (r) {
1581 return r;
1582 }
1583
1584 for (i = 0; i < ttm->num_pages; i++) {
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001585 dma_addr_t addr;
1586
Ben Skeggs359088d2017-11-01 03:56:19 +10001587 addr = dma_map_page(dev, ttm->pages[i], 0, PAGE_SIZE,
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001588 DMA_BIDIRECTIONAL);
1589
Ben Skeggs359088d2017-11-01 03:56:19 +10001590 if (dma_mapping_error(dev, addr)) {
Rasmus Villemoes4fbbed42016-02-15 19:41:46 +01001591 while (i--) {
Ben Skeggs359088d2017-11-01 03:56:19 +10001592 dma_unmap_page(dev, ttm_dma->dma_address[i],
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001593 PAGE_SIZE, DMA_BIDIRECTIONAL);
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001594 ttm_dma->dma_address[i] = 0;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001595 }
1596 ttm_pool_unpopulate(ttm);
1597 return -EFAULT;
1598 }
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001599
1600 ttm_dma->dma_address[i] = addr;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001601 }
1602 return 0;
1603}
1604
1605static void
1606nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1607{
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001608 struct ttm_dma_tt *ttm_dma = (void *)ttm;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001609 struct nouveau_drm *drm;
Ben Skeggs359088d2017-11-01 03:56:19 +10001610 struct device *dev;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001611 unsigned i;
Dave Airlie22b33e82012-04-02 11:53:06 +01001612 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1613
1614 if (slave)
1615 return;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001616
Ben Skeggsebb945a2012-07-20 08:17:34 +10001617 drm = nouveau_bdev(ttm->bdev);
Ben Skeggs359088d2017-11-01 03:56:19 +10001618 dev = drm->dev->dev;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001619
Daniel Vettera7fb8a22015-09-09 16:45:52 +02001620#if IS_ENABLED(CONFIG_AGP)
Ben Skeggs340b0e72015-08-20 14:54:23 +10001621 if (drm->agp.bridge) {
Jerome Glissedea7e0a2012-01-03 17:37:37 -05001622 ttm_agp_tt_unpopulate(ttm);
1623 return;
1624 }
1625#endif
1626
Alexandre Courbot9bcd38d2016-03-02 19:12:27 +09001627#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001628 if (swiotlb_nr_tbl()) {
Ben Skeggs359088d2017-11-01 03:56:19 +10001629 ttm_dma_unpopulate((void *)ttm, dev);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001630 return;
1631 }
1632#endif
1633
1634 for (i = 0; i < ttm->num_pages; i++) {
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001635 if (ttm_dma->dma_address[i]) {
Ben Skeggs359088d2017-11-01 03:56:19 +10001636 dma_unmap_page(dev, ttm_dma->dma_address[i], PAGE_SIZE,
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001637 DMA_BIDIRECTIONAL);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001638 }
1639 }
1640
1641 ttm_pool_unpopulate(ttm);
1642}
1643
Maarten Lankhorstdd7cfd62014-01-21 13:07:31 +01001644void
Maarten Lankhorst809e9442014-04-09 16:19:30 +02001645nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
Maarten Lankhorstdd7cfd62014-01-21 13:07:31 +01001646{
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +01001647 struct reservation_object *resv = nvbo->bo.resv;
Maarten Lankhorstdd7cfd62014-01-21 13:07:31 +01001648
Maarten Lankhorst809e9442014-04-09 16:19:30 +02001649 if (exclusive)
1650 reservation_object_add_excl_fence(resv, &fence->base);
1651 else if (fence)
1652 reservation_object_add_shared_fence(resv, &fence->base);
Maarten Lankhorstdd7cfd62014-01-21 13:07:31 +01001653}
1654
Ben Skeggs6ee73862009-12-11 19:24:15 +10001655struct ttm_bo_driver nouveau_bo_driver = {
Jerome Glisse649bf3c2011-11-01 20:46:13 -04001656 .ttm_tt_create = &nouveau_ttm_tt_create,
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001657 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1658 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
Ben Skeggs6ee73862009-12-11 19:24:15 +10001659 .invalidate_caches = nouveau_bo_invalidate_caches,
1660 .init_mem_type = nouveau_bo_init_mem_type,
Christian Königa2ab19fe2016-08-30 17:26:04 +02001661 .eviction_valuable = ttm_bo_eviction_valuable,
Ben Skeggs6ee73862009-12-11 19:24:15 +10001662 .evict_flags = nouveau_bo_evict_flags,
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001663 .move_notify = nouveau_bo_move_ntfy,
Ben Skeggs6ee73862009-12-11 19:24:15 +10001664 .move = nouveau_bo_move,
1665 .verify_access = nouveau_bo_verify_access,
Jerome Glissef32f02f2010-04-09 14:39:25 +02001666 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1667 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1668 .io_mem_free = &nouveau_ttm_io_mem_free,
Christian Königea642c32017-03-28 16:54:50 +02001669 .io_mem_pfn = ttm_bo_default_io_mem_pfn,
Ben Skeggs6ee73862009-12-11 19:24:15 +10001670};