blob: bbcfd278eacb99c8707c39e5cbf8d8cc46be3191 [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
Ben Skeggsfdb751e2014-08-10 04:10:23 +100030#include <linux/dma-mapping.h>
Chris Metcalf3e2b7562013-02-01 13:44:33 -050031#include <linux/swiotlb.h>
Ben Skeggs6ee73862009-12-11 19:24:15 +100032
Ben Skeggs4dc28132016-05-20 09:22:55 +100033#include "nouveau_drv.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100034#include "nouveau_dma.h"
Ben Skeggsd375e7d52012-04-30 13:30:00 +100035#include "nouveau_fence.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100036
Ben Skeggsebb945a2012-07-20 08:17:34 +100037#include "nouveau_bo.h"
38#include "nouveau_ttm.h"
39#include "nouveau_gem.h"
Maarten Maathuisa5106042009-12-26 21:46:36 +010040
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100041/*
42 * NV10-NV40 tiling helpers
43 */
44
45static void
Ben Skeggsebb945a2012-07-20 08:17:34 +100046nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
47 u32 addr, u32 size, u32 pitch, u32 flags)
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100048{
Ben Skeggs77145f12012-07-31 16:16:21 +100049 struct nouveau_drm *drm = nouveau_drm(dev);
Ben Skeggsebb945a2012-07-20 08:17:34 +100050 int i = reg - drm->tile.reg;
Ben Skeggs1167c6b2016-05-18 13:57:42 +100051 struct nvkm_device *device = nvxx_device(&drm->client.device);
Ben Skeggsc85ee6c2015-08-20 14:54:22 +100052 struct nvkm_fb *fb = device->fb;
Ben Skeggsb1e45532015-08-20 14:54:06 +100053 struct nvkm_fb_tile *tile = &fb->tile.region[i];
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100054
Ben Skeggsebb945a2012-07-20 08:17:34 +100055 nouveau_fence_unref(&reg->fence);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100056
57 if (tile->pitch)
Ben Skeggs03c89522015-08-20 14:54:20 +100058 nvkm_fb_tile_fini(fb, i, tile);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100059
60 if (pitch)
Ben Skeggs03c89522015-08-20 14:54:20 +100061 nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100062
Ben Skeggs03c89522015-08-20 14:54:20 +100063 nvkm_fb_tile_prog(fb, i, tile);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100064}
65
Ben Skeggsebb945a2012-07-20 08:17:34 +100066static struct nouveau_drm_tile *
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100067nv10_bo_get_tile_region(struct drm_device *dev, int i)
68{
Ben Skeggs77145f12012-07-31 16:16:21 +100069 struct nouveau_drm *drm = nouveau_drm(dev);
Ben Skeggsebb945a2012-07-20 08:17:34 +100070 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100071
Ben Skeggsebb945a2012-07-20 08:17:34 +100072 spin_lock(&drm->tile.lock);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100073
74 if (!tile->used &&
75 (!tile->fence || nouveau_fence_done(tile->fence)))
76 tile->used = true;
77 else
78 tile = NULL;
79
Ben Skeggsebb945a2012-07-20 08:17:34 +100080 spin_unlock(&drm->tile.lock);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100081 return tile;
82}
83
84static void
Ben Skeggsebb945a2012-07-20 08:17:34 +100085nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
Chris Wilsonf54d1862016-10-25 13:00:45 +010086 struct dma_fence *fence)
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100087{
Ben Skeggs77145f12012-07-31 16:16:21 +100088 struct nouveau_drm *drm = nouveau_drm(dev);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100089
90 if (tile) {
Ben Skeggsebb945a2012-07-20 08:17:34 +100091 spin_lock(&drm->tile.lock);
Chris Wilsonf54d1862016-10-25 13:00:45 +010092 tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100093 tile->used = false;
Ben Skeggsebb945a2012-07-20 08:17:34 +100094 spin_unlock(&drm->tile.lock);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +100095 }
96}
97
Ben Skeggsebb945a2012-07-20 08:17:34 +100098static struct nouveau_drm_tile *
99nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
100 u32 size, u32 pitch, u32 flags)
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000101{
Ben Skeggs77145f12012-07-31 16:16:21 +1000102 struct nouveau_drm *drm = nouveau_drm(dev);
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000103 struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
Ben Skeggsebb945a2012-07-20 08:17:34 +1000104 struct nouveau_drm_tile *tile, *found = NULL;
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000105 int i;
106
Ben Skeggsb1e45532015-08-20 14:54:06 +1000107 for (i = 0; i < fb->tile.regions; i++) {
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000108 tile = nv10_bo_get_tile_region(dev, i);
109
110 if (pitch && !found) {
111 found = tile;
112 continue;
113
Ben Skeggsb1e45532015-08-20 14:54:06 +1000114 } else if (tile && fb->tile.region[i].pitch) {
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000115 /* Kill an unused tile region. */
116 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
117 }
118
119 nv10_bo_put_tile_region(dev, tile, NULL);
120 }
121
122 if (found)
123 nv10_bo_update_tile_region(dev, found, addr, size,
124 pitch, flags);
125 return found;
126}
127
Ben Skeggs6ee73862009-12-11 19:24:15 +1000128static void
129nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
130{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000131 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
132 struct drm_device *dev = drm->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000133 struct nouveau_bo *nvbo = nouveau_bo(bo);
134
David Herrmann55fb74a2013-10-02 10:15:17 +0200135 if (unlikely(nvbo->gem.filp))
Ben Skeggs6ee73862009-12-11 19:24:15 +1000136 DRM_ERROR("bo %p still attached to GEM object\n", bo);
Maarten Lankhorst4f385592013-07-07 10:37:35 +0200137 WARN_ON(nvbo->pin_refcnt > 0);
Ben Skeggsbc9e7b92012-07-19 17:54:21 +1000138 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000139 kfree(nvbo);
140}
141
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100142static void
Ben Skeggsdb5c8e22011-02-10 13:41:01 +1000143nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
Ben Skeggsf91bac52011-06-06 14:15:46 +1000144 int *align, int *size)
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100145{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000146 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000147 struct nvif_device *device = &drm->client.device;
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100148
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000149 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
Ben Skeggsbfd83ac2010-11-12 15:12:51 +1000150 if (nvbo->tile_mode) {
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000151 if (device->info.chipset >= 0x40) {
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100152 *align = 65536;
Ben Skeggsbfd83ac2010-11-12 15:12:51 +1000153 *size = roundup(*size, 64 * nvbo->tile_mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100154
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000155 } else if (device->info.chipset >= 0x30) {
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100156 *align = 32768;
Ben Skeggsbfd83ac2010-11-12 15:12:51 +1000157 *size = roundup(*size, 64 * nvbo->tile_mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100158
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000159 } else if (device->info.chipset >= 0x20) {
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100160 *align = 16384;
Ben Skeggsbfd83ac2010-11-12 15:12:51 +1000161 *size = roundup(*size, 64 * nvbo->tile_mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100162
Ben Skeggs967e7bd2014-08-10 04:10:22 +1000163 } else if (device->info.chipset >= 0x10) {
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100164 *align = 16384;
Ben Skeggsbfd83ac2010-11-12 15:12:51 +1000165 *size = roundup(*size, 32 * nvbo->tile_mode);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100166 }
167 }
Ben Skeggsbfd83ac2010-11-12 15:12:51 +1000168 } else {
Ben Skeggsf91bac52011-06-06 14:15:46 +1000169 *size = roundup(*size, (1 << nvbo->page_shift));
170 *align = max((1 << nvbo->page_shift), *align);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100171 }
172
Maarten Maathuis1c7059e2009-12-25 18:51:17 +0100173 *size = roundup(*size, PAGE_SIZE);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100174}
175
Ben Skeggs6ee73862009-12-11 19:24:15 +1000176int
Ben Skeggsbab7cc12016-05-24 17:26:48 +1000177nouveau_bo_new(struct nouveau_cli *cli, int size, int align,
Ben Skeggs7375c952011-06-07 14:21:29 +1000178 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
Maarten Lankhorstbb6178b2014-01-09 11:03:15 +0100179 struct sg_table *sg, struct reservation_object *robj,
Ben Skeggs7375c952011-06-07 14:21:29 +1000180 struct nouveau_bo **pnvbo)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000181{
Ben Skeggsbab7cc12016-05-24 17:26:48 +1000182 struct nouveau_drm *drm = nouveau_drm(cli->dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000183 struct nouveau_bo *nvbo;
Jerome Glisse57de4ba2011-11-11 15:42:57 -0500184 size_t acc_size;
Ben Skeggsf91bac52011-06-06 14:15:46 +1000185 int ret;
Dave Airlie22b33e82012-04-02 11:53:06 +0100186 int type = ttm_bo_type_device;
Maarten Lankhorst35095f72013-07-27 10:17:12 +0200187 int lpg_shift = 12;
188 int max_size;
189
Ben Skeggs3ee6f5b2014-08-10 04:10:20 +1000190 if (drm->client.vm)
Ben Skeggs5ce3bf32015-01-14 09:57:36 +1000191 lpg_shift = drm->client.vm->mmu->lpg_shift;
Maarten Lankhorst35095f72013-07-27 10:17:12 +0200192 max_size = INT_MAX & ~((1 << lpg_shift) - 1);
Maarten Lankhorst0108bc82013-07-07 10:40:19 +0200193
194 if (size <= 0 || size > max_size) {
Ben Skeggsfa2bade2014-08-10 04:10:22 +1000195 NV_WARN(drm, "skipped size %x\n", (u32)size);
Maarten Lankhorst0108bc82013-07-07 10:40:19 +0200196 return -EINVAL;
197 }
Dave Airlie22b33e82012-04-02 11:53:06 +0100198
199 if (sg)
200 type = ttm_bo_type_sg;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000201
202 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
203 if (!nvbo)
204 return -ENOMEM;
205 INIT_LIST_HEAD(&nvbo->head);
206 INIT_LIST_HEAD(&nvbo->entry);
Ben Skeggsfd2871a2011-06-06 14:07:04 +1000207 INIT_LIST_HEAD(&nvbo->vma_list);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000208 nvbo->tile_mode = tile_mode;
209 nvbo->tile_flags = tile_flags;
Ben Skeggsebb945a2012-07-20 08:17:34 +1000210 nvbo->bo.bdev = &drm->ttm.bdev;
Ben Skeggsbab7cc12016-05-24 17:26:48 +1000211 nvbo->cli = cli;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000212
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000213 if (!nvxx_device(&drm->client.device)->func->cpu_coherent)
Karol Herbstbad3d802016-09-18 12:21:56 +0200214 nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900215
Ben Skeggsf91bac52011-06-06 14:15:46 +1000216 nvbo->page_shift = 12;
Ben Skeggs3ee6f5b2014-08-10 04:10:20 +1000217 if (drm->client.vm) {
Ben Skeggsf91bac52011-06-06 14:15:46 +1000218 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
Ben Skeggs5ce3bf32015-01-14 09:57:36 +1000219 nvbo->page_shift = drm->client.vm->mmu->lpg_shift;
Ben Skeggsf91bac52011-06-06 14:15:46 +1000220 }
221
222 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
Ben Skeggsfd2871a2011-06-06 14:07:04 +1000223 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
224 nouveau_bo_placement_set(nvbo, flags, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000225
Ben Skeggsebb945a2012-07-20 08:17:34 +1000226 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
Jerome Glisse57de4ba2011-11-11 15:42:57 -0500227 sizeof(struct nouveau_bo));
228
Ben Skeggsebb945a2012-07-20 08:17:34 +1000229 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
Dave Airlie22b33e82012-04-02 11:53:06 +0100230 type, &nvbo->placement,
Marcin Slusarz0b91c4a2012-11-06 21:49:51 +0000231 align >> PAGE_SHIFT, false, NULL, acc_size, sg,
Maarten Lankhorstbb6178b2014-01-09 11:03:15 +0100232 robj, nouveau_bo_del_ttm);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000233 if (ret) {
234 /* ttm will call nouveau_bo_del_ttm if it fails.. */
235 return ret;
236 }
237
Ben Skeggs6ee73862009-12-11 19:24:15 +1000238 *pnvbo = nvbo;
239 return 0;
240}
241
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100242static void
Christian Königf1217ed2014-08-27 13:16:04 +0200243set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000244{
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100245 *n = 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000246
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100247 if (type & TTM_PL_FLAG_VRAM)
Christian Königf1217ed2014-08-27 13:16:04 +0200248 pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100249 if (type & TTM_PL_FLAG_TT)
Christian Königf1217ed2014-08-27 13:16:04 +0200250 pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100251 if (type & TTM_PL_FLAG_SYSTEM)
Christian Königf1217ed2014-08-27 13:16:04 +0200252 pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100253}
Ben Skeggs37cb3e082009-12-16 16:22:42 +1000254
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200255static void
256set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
257{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000258 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000259 u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
Christian Königf1217ed2014-08-27 13:16:04 +0200260 unsigned i, fpfn, lpfn;
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200261
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000262 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
Francisco Jerez812f2192011-02-03 01:49:33 +0100263 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
Francisco Jerez4beb1162011-11-06 21:21:28 +0100264 nvbo->bo.mem.num_pages < vram_pages / 4) {
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200265 /*
266 * Make sure that the color and depth buffers are handled
267 * by independent memory controller units. Up to a 9x
268 * speed up when alpha-blending and depth-test are enabled
269 * at the same time.
270 */
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200271 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
Christian Königf1217ed2014-08-27 13:16:04 +0200272 fpfn = vram_pages / 2;
273 lpfn = ~0;
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200274 } else {
Christian Königf1217ed2014-08-27 13:16:04 +0200275 fpfn = 0;
276 lpfn = vram_pages / 2;
277 }
278 for (i = 0; i < nvbo->placement.num_placement; ++i) {
279 nvbo->placements[i].fpfn = fpfn;
280 nvbo->placements[i].lpfn = lpfn;
281 }
282 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
283 nvbo->busy_placements[i].fpfn = fpfn;
284 nvbo->busy_placements[i].lpfn = lpfn;
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200285 }
286 }
287}
288
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100289void
290nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
291{
292 struct ttm_placement *pl = &nvbo->placement;
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900293 uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
294 TTM_PL_MASK_CACHING) |
295 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100296
297 pl->placement = nvbo->placements;
298 set_placement_list(nvbo->placements, &pl->num_placement,
299 type, flags);
300
301 pl->busy_placement = nvbo->busy_placements;
302 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
303 type | busy, flags);
Francisco Jerez699ddfd2010-10-10 06:07:32 +0200304
305 set_placement_range(nvbo, type);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000306}
307
308int
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000309nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000310{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000311 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000312 struct ttm_buffer_object *bo = &nvbo->bo;
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000313 bool force = false, evict = false;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100314 int ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000315
Christian Königdfd5e502016-04-06 11:12:03 +0200316 ret = ttm_bo_reserve(bo, false, false, NULL);
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100317 if (ret)
Ben Skeggs50ab2e52014-11-10 11:12:17 +1000318 return ret;
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100319
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000320 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000321 memtype == TTM_PL_FLAG_VRAM && contig) {
322 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
323 if (bo->mem.mem_type == TTM_PL_VRAM) {
Ben Skeggsbe83cd42015-01-14 15:36:34 +1000324 struct nvkm_mem *mem = bo->mem.mm_node;
Ben Skeggs134fdc12015-10-03 17:34:25 +1000325 if (!nvkm_mm_contiguous(mem->mem))
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000326 evict = true;
327 }
328 nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG;
329 force = true;
330 }
331 }
332
333 if (nvbo->pin_refcnt) {
334 if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
335 NV_ERROR(drm, "bo %p pinned elsewhere: "
336 "0x%08x vs 0x%08x\n", bo,
337 1 << bo->mem.mem_type, memtype);
338 ret = -EBUSY;
339 }
340 nvbo->pin_refcnt++;
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100341 goto out;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000342 }
343
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000344 if (evict) {
345 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
346 ret = nouveau_bo_validate(nvbo, false, false);
347 if (ret)
348 goto out;
349 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000350
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000351 nvbo->pin_refcnt++;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100352 nouveau_bo_placement_set(nvbo, memtype, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000353
Ben Skeggs50ab2e52014-11-10 11:12:17 +1000354 /* drop pin_refcnt temporarily, so we don't trip the assertion
355 * in nouveau_bo_move() that makes sure we're not trying to
356 * move a pinned buffer
357 */
358 nvbo->pin_refcnt--;
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000359 ret = nouveau_bo_validate(nvbo, false, false);
Ben Skeggs6aac6ce2014-11-06 14:34:31 +1000360 if (ret)
361 goto out;
Ben Skeggs50ab2e52014-11-10 11:12:17 +1000362 nvbo->pin_refcnt++;
Ben Skeggs6aac6ce2014-11-06 14:34:31 +1000363
364 switch (bo->mem.mem_type) {
365 case TTM_PL_VRAM:
366 drm->gem.vram_available -= bo->mem.size;
367 break;
368 case TTM_PL_TT:
369 drm->gem.gart_available -= bo->mem.size;
370 break;
371 default:
372 break;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000373 }
Alexandre Courbot5be5a152014-10-27 18:11:52 +0900374
Ben Skeggs6ee73862009-12-11 19:24:15 +1000375out:
Ben Skeggsad76b3f2014-11-10 11:24:27 +1000376 if (force && ret)
377 nvbo->tile_flags |= NOUVEAU_GEM_TILE_NONCONTIG;
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100378 ttm_bo_unreserve(bo);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000379 return ret;
380}
381
382int
383nouveau_bo_unpin(struct nouveau_bo *nvbo)
384{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000385 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000386 struct ttm_buffer_object *bo = &nvbo->bo;
Maarten Lankhorst4f385592013-07-07 10:37:35 +0200387 int ret, ref;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000388
Christian Königdfd5e502016-04-06 11:12:03 +0200389 ret = ttm_bo_reserve(bo, false, false, NULL);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000390 if (ret)
391 return ret;
392
Maarten Lankhorst4f385592013-07-07 10:37:35 +0200393 ref = --nvbo->pin_refcnt;
394 WARN_ON_ONCE(ref < 0);
395 if (ref)
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100396 goto out;
397
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100398 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000399
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000400 ret = nouveau_bo_validate(nvbo, false, false);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000401 if (ret == 0) {
402 switch (bo->mem.mem_type) {
403 case TTM_PL_VRAM:
Ben Skeggsebb945a2012-07-20 08:17:34 +1000404 drm->gem.vram_available += bo->mem.size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000405 break;
406 case TTM_PL_TT:
Ben Skeggsebb945a2012-07-20 08:17:34 +1000407 drm->gem.gart_available += bo->mem.size;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000408 break;
409 default:
410 break;
411 }
412 }
413
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +0100414out:
Ben Skeggs6ee73862009-12-11 19:24:15 +1000415 ttm_bo_unreserve(bo);
416 return ret;
417}
418
419int
420nouveau_bo_map(struct nouveau_bo *nvbo)
421{
422 int ret;
423
Christian Königdfd5e502016-04-06 11:12:03 +0200424 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000425 if (ret)
426 return ret;
427
Alexandre Courbot36a471b2016-07-13 15:29:35 +0900428 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900429
Ben Skeggs6ee73862009-12-11 19:24:15 +1000430 ttm_bo_unreserve(&nvbo->bo);
431 return ret;
432}
433
434void
435nouveau_bo_unmap(struct nouveau_bo *nvbo)
436{
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900437 if (!nvbo)
438 return;
439
Alexandre Courbot36a471b2016-07-13 15:29:35 +0900440 ttm_bo_kunmap(&nvbo->kmap);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000441}
442
Alexandre Courbotb22870b2014-10-27 18:49:19 +0900443void
444nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
445{
446 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000447 struct nvkm_device *device = nvxx_device(&drm->client.device);
Alexandre Courbotb22870b2014-10-27 18:49:19 +0900448 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
449 int i;
450
451 if (!ttm_dma)
452 return;
453
454 /* Don't waste time looping if the object is coherent */
455 if (nvbo->force_coherent)
456 return;
457
458 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
Ben Skeggs26c9e8e2015-08-20 14:54:23 +1000459 dma_sync_single_for_device(device->dev, ttm_dma->dma_address[i],
460 PAGE_SIZE, DMA_TO_DEVICE);
Alexandre Courbotb22870b2014-10-27 18:49:19 +0900461}
462
463void
464nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
465{
466 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000467 struct nvkm_device *device = nvxx_device(&drm->client.device);
Alexandre Courbotb22870b2014-10-27 18:49:19 +0900468 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
469 int i;
470
471 if (!ttm_dma)
472 return;
473
474 /* Don't waste time looping if the object is coherent */
475 if (nvbo->force_coherent)
476 return;
477
478 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
Ben Skeggs26c9e8e2015-08-20 14:54:23 +1000479 dma_sync_single_for_cpu(device->dev, ttm_dma->dma_address[i],
480 PAGE_SIZE, DMA_FROM_DEVICE);
Alexandre Courbotb22870b2014-10-27 18:49:19 +0900481}
482
Ben Skeggs7a45d762010-11-22 08:50:27 +1000483int
484nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000485 bool no_wait_gpu)
Ben Skeggs7a45d762010-11-22 08:50:27 +1000486{
487 int ret;
488
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000489 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
490 interruptible, no_wait_gpu);
Ben Skeggs7a45d762010-11-22 08:50:27 +1000491 if (ret)
492 return ret;
493
Alexandre Courbotb22870b2014-10-27 18:49:19 +0900494 nouveau_bo_sync_for_device(nvbo);
495
Ben Skeggs7a45d762010-11-22 08:50:27 +1000496 return 0;
497}
498
Ben Skeggs6ee73862009-12-11 19:24:15 +1000499void
500nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
501{
502 bool is_iomem;
503 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900504
Alexandre Courbot36a471b2016-07-13 15:29:35 +0900505 mem += index;
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900506
Ben Skeggs6ee73862009-12-11 19:24:15 +1000507 if (is_iomem)
508 iowrite16_native(val, (void __force __iomem *)mem);
509 else
510 *mem = val;
511}
512
513u32
514nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
515{
516 bool is_iomem;
517 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900518
Alexandre Courbot36a471b2016-07-13 15:29:35 +0900519 mem += index;
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900520
Ben Skeggs6ee73862009-12-11 19:24:15 +1000521 if (is_iomem)
522 return ioread32_native((void __force __iomem *)mem);
523 else
524 return *mem;
525}
526
527void
528nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
529{
530 bool is_iomem;
531 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900532
Alexandre Courbot36a471b2016-07-13 15:29:35 +0900533 mem += index;
Alexandre Courbotc3a0c772014-10-27 18:49:17 +0900534
Ben Skeggs6ee73862009-12-11 19:24:15 +1000535 if (is_iomem)
536 iowrite32_native(val, (void __force __iomem *)mem);
537 else
538 *mem = val;
539}
540
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400541static struct ttm_tt *
Ben Skeggsebb945a2012-07-20 08:17:34 +1000542nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
543 uint32_t page_flags, struct page *dummy_read)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000544{
Daniel Vettera7fb8a22015-09-09 16:45:52 +0200545#if IS_ENABLED(CONFIG_AGP)
Ben Skeggsebb945a2012-07-20 08:17:34 +1000546 struct nouveau_drm *drm = nouveau_bdev(bdev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000547
Ben Skeggs340b0e72015-08-20 14:54:23 +1000548 if (drm->agp.bridge) {
549 return ttm_agp_tt_create(bdev, drm->agp.bridge, size,
Ben Skeggsebb945a2012-07-20 08:17:34 +1000550 page_flags, dummy_read);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000551 }
Max Filippovdf1b4b92012-10-14 01:58:26 +0400552#endif
Ben Skeggs6ee73862009-12-11 19:24:15 +1000553
Ben Skeggsebb945a2012-07-20 08:17:34 +1000554 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000555}
556
557static int
558nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
559{
560 /* We'll do this from user space. */
561 return 0;
562}
563
564static int
565nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
566 struct ttm_mem_type_manager *man)
567{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000568 struct nouveau_drm *drm = nouveau_bdev(bdev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000569
570 switch (type) {
571 case TTM_PL_SYSTEM:
572 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
573 man->available_caching = TTM_PL_MASK_CACHING;
574 man->default_caching = TTM_PL_FLAG_CACHED;
575 break;
576 case TTM_PL_VRAM:
Alexandre Courbote2a4e782014-06-27 19:28:50 +0900577 man->flags = TTM_MEMTYPE_FLAG_FIXED |
578 TTM_MEMTYPE_FLAG_MAPPABLE;
579 man->available_caching = TTM_PL_FLAG_UNCACHED |
580 TTM_PL_FLAG_WC;
581 man->default_caching = TTM_PL_FLAG_WC;
582
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000583 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
Alexandre Courbote2a4e782014-06-27 19:28:50 +0900584 /* Some BARs do not support being ioremapped WC */
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000585 if (nvxx_bar(&drm->client.device)->iomap_uncached) {
Alexandre Courbote2a4e782014-06-27 19:28:50 +0900586 man->available_caching = TTM_PL_FLAG_UNCACHED;
587 man->default_caching = TTM_PL_FLAG_UNCACHED;
588 }
589
Ben Skeggs573a2a32010-08-25 15:26:04 +1000590 man->func = &nouveau_vram_manager;
Ben Skeggsf869ef82010-11-15 11:53:16 +1000591 man->io_reserve_fastpath = false;
592 man->use_io_reserve_lru = true;
593 } else {
Ben Skeggs573a2a32010-08-25 15:26:04 +1000594 man->func = &ttm_bo_manager_func;
Ben Skeggsf869ef82010-11-15 11:53:16 +1000595 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000596 break;
597 case TTM_PL_TT:
Ben Skeggs1167c6b2016-05-18 13:57:42 +1000598 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
Ben Skeggs26c0c9e2011-02-10 12:59:51 +1000599 man->func = &nouveau_gart_manager;
600 else
Ben Skeggs340b0e72015-08-20 14:54:23 +1000601 if (!drm->agp.bridge)
Ben Skeggs3863c9b2012-07-14 19:09:17 +1000602 man->func = &nv04_gart_manager;
603 else
Ben Skeggs26c0c9e2011-02-10 12:59:51 +1000604 man->func = &ttm_bo_manager_func;
Ben Skeggsebb945a2012-07-20 08:17:34 +1000605
Ben Skeggs340b0e72015-08-20 14:54:23 +1000606 if (drm->agp.bridge) {
Jerome Glissef32f02f2010-04-09 14:39:25 +0200607 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
Francisco Jereza3d487e2010-11-20 22:11:22 +0100608 man->available_caching = TTM_PL_FLAG_UNCACHED |
609 TTM_PL_FLAG_WC;
610 man->default_caching = TTM_PL_FLAG_WC;
Ben Skeggsebb945a2012-07-20 08:17:34 +1000611 } else {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000612 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
613 TTM_MEMTYPE_FLAG_CMA;
614 man->available_caching = TTM_PL_MASK_CACHING;
615 man->default_caching = TTM_PL_FLAG_CACHED;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000616 }
Ben Skeggsebb945a2012-07-20 08:17:34 +1000617
Ben Skeggs6ee73862009-12-11 19:24:15 +1000618 break;
619 default:
Ben Skeggs6ee73862009-12-11 19:24:15 +1000620 return -EINVAL;
621 }
622 return 0;
623}
624
625static void
626nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
627{
628 struct nouveau_bo *nvbo = nouveau_bo(bo);
629
630 switch (bo->mem.mem_type) {
Francisco Jerez22fbd532009-12-11 18:40:17 +0100631 case TTM_PL_VRAM:
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100632 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
633 TTM_PL_FLAG_SYSTEM);
Francisco Jerez22fbd532009-12-11 18:40:17 +0100634 break;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000635 default:
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100636 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000637 break;
638 }
Francisco Jerez22fbd532009-12-11 18:40:17 +0100639
640 *pl = nvbo->placement;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000641}
642
643
Ben Skeggs6ee73862009-12-11 19:24:15 +1000644static int
Ben Skeggs49981042012-08-06 19:38:25 +1000645nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
646{
647 int ret = RING_SPACE(chan, 2);
648 if (ret == 0) {
649 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
Ben Skeggs00fc6f62013-07-09 14:20:15 +1000650 OUT_RING (chan, handle & 0x0000ffff);
Ben Skeggs49981042012-08-06 19:38:25 +1000651 FIRE_RING (chan);
652 }
653 return ret;
654}
655
656static int
Ben Skeggsc6b7e892012-03-20 14:36:04 +1000657nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000658 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggsc6b7e892012-03-20 14:36:04 +1000659{
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000660 struct nvkm_mem *mem = old_reg->mm_node;
Ben Skeggsc6b7e892012-03-20 14:36:04 +1000661 int ret = RING_SPACE(chan, 10);
662 if (ret == 0) {
Ben Skeggs6d597022012-04-01 21:09:13 +1000663 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000664 OUT_RING (chan, upper_32_bits(mem->vma[0].offset));
665 OUT_RING (chan, lower_32_bits(mem->vma[0].offset));
666 OUT_RING (chan, upper_32_bits(mem->vma[1].offset));
667 OUT_RING (chan, lower_32_bits(mem->vma[1].offset));
Ben Skeggsc6b7e892012-03-20 14:36:04 +1000668 OUT_RING (chan, PAGE_SIZE);
669 OUT_RING (chan, PAGE_SIZE);
670 OUT_RING (chan, PAGE_SIZE);
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000671 OUT_RING (chan, new_reg->num_pages);
Ben Skeggs6d597022012-04-01 21:09:13 +1000672 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
Ben Skeggsc6b7e892012-03-20 14:36:04 +1000673 }
674 return ret;
675}
676
677static int
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000678nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
679{
680 int ret = RING_SPACE(chan, 2);
681 if (ret == 0) {
682 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
683 OUT_RING (chan, handle);
684 }
685 return ret;
686}
687
688static int
Ben Skeggs1a460982012-05-04 15:17:28 +1000689nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000690 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggs1a460982012-05-04 15:17:28 +1000691{
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000692 struct nvkm_mem *mem = old_reg->mm_node;
693 u64 src_offset = mem->vma[0].offset;
694 u64 dst_offset = mem->vma[1].offset;
695 u32 page_count = new_reg->num_pages;
Ben Skeggs1a460982012-05-04 15:17:28 +1000696 int ret;
697
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000698 page_count = new_reg->num_pages;
Ben Skeggs1a460982012-05-04 15:17:28 +1000699 while (page_count) {
700 int line_count = (page_count > 8191) ? 8191 : page_count;
701
702 ret = RING_SPACE(chan, 11);
703 if (ret)
704 return ret;
705
706 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
707 OUT_RING (chan, upper_32_bits(src_offset));
708 OUT_RING (chan, lower_32_bits(src_offset));
709 OUT_RING (chan, upper_32_bits(dst_offset));
710 OUT_RING (chan, lower_32_bits(dst_offset));
711 OUT_RING (chan, PAGE_SIZE);
712 OUT_RING (chan, PAGE_SIZE);
713 OUT_RING (chan, PAGE_SIZE);
714 OUT_RING (chan, line_count);
715 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
716 OUT_RING (chan, 0x00000110);
717
718 page_count -= line_count;
719 src_offset += (PAGE_SIZE * line_count);
720 dst_offset += (PAGE_SIZE * line_count);
721 }
722
723 return 0;
724}
725
726static int
Ben Skeggs183720b2010-12-09 15:17:10 +1000727nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000728 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggs183720b2010-12-09 15:17:10 +1000729{
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000730 struct nvkm_mem *mem = old_reg->mm_node;
731 u64 src_offset = mem->vma[0].offset;
732 u64 dst_offset = mem->vma[1].offset;
733 u32 page_count = new_reg->num_pages;
Ben Skeggs183720b2010-12-09 15:17:10 +1000734 int ret;
735
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000736 page_count = new_reg->num_pages;
Ben Skeggs183720b2010-12-09 15:17:10 +1000737 while (page_count) {
738 int line_count = (page_count > 2047) ? 2047 : page_count;
739
740 ret = RING_SPACE(chan, 12);
741 if (ret)
742 return ret;
743
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000744 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
Ben Skeggs183720b2010-12-09 15:17:10 +1000745 OUT_RING (chan, upper_32_bits(dst_offset));
746 OUT_RING (chan, lower_32_bits(dst_offset));
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000747 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
Ben Skeggs183720b2010-12-09 15:17:10 +1000748 OUT_RING (chan, upper_32_bits(src_offset));
749 OUT_RING (chan, lower_32_bits(src_offset));
750 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
751 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
752 OUT_RING (chan, PAGE_SIZE); /* line_length */
753 OUT_RING (chan, line_count);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000754 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
Ben Skeggs183720b2010-12-09 15:17:10 +1000755 OUT_RING (chan, 0x00100110);
756
757 page_count -= line_count;
758 src_offset += (PAGE_SIZE * line_count);
759 dst_offset += (PAGE_SIZE * line_count);
760 }
761
762 return 0;
763}
764
765static int
Ben Skeggsfdf53242012-05-04 15:15:12 +1000766nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000767 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggsfdf53242012-05-04 15:15:12 +1000768{
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000769 struct nvkm_mem *mem = old_reg->mm_node;
770 u64 src_offset = mem->vma[0].offset;
771 u64 dst_offset = mem->vma[1].offset;
772 u32 page_count = new_reg->num_pages;
Ben Skeggsfdf53242012-05-04 15:15:12 +1000773 int ret;
774
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000775 page_count = new_reg->num_pages;
Ben Skeggsfdf53242012-05-04 15:15:12 +1000776 while (page_count) {
777 int line_count = (page_count > 8191) ? 8191 : page_count;
778
779 ret = RING_SPACE(chan, 11);
780 if (ret)
781 return ret;
782
783 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
784 OUT_RING (chan, upper_32_bits(src_offset));
785 OUT_RING (chan, lower_32_bits(src_offset));
786 OUT_RING (chan, upper_32_bits(dst_offset));
787 OUT_RING (chan, lower_32_bits(dst_offset));
788 OUT_RING (chan, PAGE_SIZE);
789 OUT_RING (chan, PAGE_SIZE);
790 OUT_RING (chan, PAGE_SIZE);
791 OUT_RING (chan, line_count);
792 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
793 OUT_RING (chan, 0x00000110);
794
795 page_count -= line_count;
796 src_offset += (PAGE_SIZE * line_count);
797 dst_offset += (PAGE_SIZE * line_count);
798 }
799
800 return 0;
801}
802
803static int
Ben Skeggs5490e5d2012-05-04 14:34:16 +1000804nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000805 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggs5490e5d2012-05-04 14:34:16 +1000806{
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000807 struct nvkm_mem *mem = old_reg->mm_node;
Ben Skeggs5490e5d2012-05-04 14:34:16 +1000808 int ret = RING_SPACE(chan, 7);
809 if (ret == 0) {
810 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000811 OUT_RING (chan, upper_32_bits(mem->vma[0].offset));
812 OUT_RING (chan, lower_32_bits(mem->vma[0].offset));
813 OUT_RING (chan, upper_32_bits(mem->vma[1].offset));
814 OUT_RING (chan, lower_32_bits(mem->vma[1].offset));
Ben Skeggs5490e5d2012-05-04 14:34:16 +1000815 OUT_RING (chan, 0x00000000 /* COPY */);
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000816 OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
Ben Skeggs5490e5d2012-05-04 14:34:16 +1000817 }
818 return ret;
819}
820
821static int
Ben Skeggs4c193d22012-05-04 14:21:15 +1000822nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000823 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggs4c193d22012-05-04 14:21:15 +1000824{
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000825 struct nvkm_mem *mem = old_reg->mm_node;
Ben Skeggs4c193d22012-05-04 14:21:15 +1000826 int ret = RING_SPACE(chan, 7);
827 if (ret == 0) {
828 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000829 OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
830 OUT_RING (chan, upper_32_bits(mem->vma[0].offset));
831 OUT_RING (chan, lower_32_bits(mem->vma[0].offset));
832 OUT_RING (chan, upper_32_bits(mem->vma[1].offset));
833 OUT_RING (chan, lower_32_bits(mem->vma[1].offset));
Ben Skeggs4c193d22012-05-04 14:21:15 +1000834 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
835 }
836 return ret;
837}
838
839static int
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000840nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
841{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000842 int ret = RING_SPACE(chan, 6);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000843 if (ret == 0) {
Ben Skeggsebb945a2012-07-20 08:17:34 +1000844 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
845 OUT_RING (chan, handle);
846 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
Ben Skeggsf45f55c2014-08-10 04:10:23 +1000847 OUT_RING (chan, chan->drm->ntfy.handle);
848 OUT_RING (chan, chan->vram.handle);
849 OUT_RING (chan, chan->vram.handle);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000850 }
851
852 return ret;
853}
854
855static int
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000856nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000857 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000858{
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000859 struct nvkm_mem *mem = old_reg->mm_node;
860 u64 length = (new_reg->num_pages << PAGE_SHIFT);
861 u64 src_offset = mem->vma[0].offset;
862 u64 dst_offset = mem->vma[1].offset;
863 int src_tiled = !!mem->memtype;
864 int dst_tiled = !!((struct nvkm_mem *)new_reg->mm_node)->memtype;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000865 int ret;
866
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000867 while (length) {
868 u32 amount, stride, height;
869
Maarten Lankhorstce8f7692013-11-12 13:34:08 +0100870 ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
871 if (ret)
872 return ret;
873
Ben Skeggs5220b3c2010-09-23 15:21:17 +1000874 amount = min(length, (u64)(4 * 1024 * 1024));
875 stride = 16 * 4;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000876 height = amount / stride;
877
Maarten Lankhorstce8f7692013-11-12 13:34:08 +0100878 if (src_tiled) {
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000879 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000880 OUT_RING (chan, 0);
Ben Skeggs5220b3c2010-09-23 15:21:17 +1000881 OUT_RING (chan, 0);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000882 OUT_RING (chan, stride);
883 OUT_RING (chan, height);
884 OUT_RING (chan, 1);
885 OUT_RING (chan, 0);
886 OUT_RING (chan, 0);
887 } else {
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000888 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000889 OUT_RING (chan, 1);
890 }
Maarten Lankhorstce8f7692013-11-12 13:34:08 +0100891 if (dst_tiled) {
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000892 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000893 OUT_RING (chan, 0);
Ben Skeggs5220b3c2010-09-23 15:21:17 +1000894 OUT_RING (chan, 0);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000895 OUT_RING (chan, stride);
896 OUT_RING (chan, height);
897 OUT_RING (chan, 1);
898 OUT_RING (chan, 0);
899 OUT_RING (chan, 0);
900 } else {
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000901 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000902 OUT_RING (chan, 1);
903 }
904
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000905 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000906 OUT_RING (chan, upper_32_bits(src_offset));
907 OUT_RING (chan, upper_32_bits(dst_offset));
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000908 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000909 OUT_RING (chan, lower_32_bits(src_offset));
910 OUT_RING (chan, lower_32_bits(dst_offset));
911 OUT_RING (chan, stride);
912 OUT_RING (chan, stride);
913 OUT_RING (chan, stride);
914 OUT_RING (chan, height);
915 OUT_RING (chan, 0x00000101);
916 OUT_RING (chan, 0x00000000);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000917 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000918 OUT_RING (chan, 0);
919
920 length -= amount;
921 src_offset += amount;
922 dst_offset += amount;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000923 }
924
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000925 return 0;
926}
927
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000928static int
929nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
930{
Ben Skeggsebb945a2012-07-20 08:17:34 +1000931 int ret = RING_SPACE(chan, 4);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000932 if (ret == 0) {
Ben Skeggsebb945a2012-07-20 08:17:34 +1000933 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
934 OUT_RING (chan, handle);
935 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
Ben Skeggsf45f55c2014-08-10 04:10:23 +1000936 OUT_RING (chan, chan->drm->ntfy.handle);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000937 }
938
939 return ret;
940}
941
Ben Skeggsa6704782011-02-16 09:10:20 +1000942static inline uint32_t
943nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000944 struct nouveau_channel *chan, struct ttm_mem_reg *reg)
Ben Skeggsa6704782011-02-16 09:10:20 +1000945{
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000946 if (reg->mem_type == TTM_PL_TT)
Ben Skeggsebb945a2012-07-20 08:17:34 +1000947 return NvDmaTT;
Ben Skeggsf45f55c2014-08-10 04:10:23 +1000948 return chan->vram.handle;
Ben Skeggsa6704782011-02-16 09:10:20 +1000949}
950
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000951static int
952nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000953 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000954{
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000955 u32 src_offset = old_reg->start << PAGE_SHIFT;
956 u32 dst_offset = new_reg->start << PAGE_SHIFT;
957 u32 page_count = new_reg->num_pages;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000958 int ret;
959
960 ret = RING_SPACE(chan, 3);
961 if (ret)
962 return ret;
963
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000964 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000965 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_reg));
966 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_reg));
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000967
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000968 page_count = new_reg->num_pages;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000969 while (page_count) {
970 int line_count = (page_count > 2047) ? 2047 : page_count;
971
Ben Skeggs6ee73862009-12-11 19:24:15 +1000972 ret = RING_SPACE(chan, 11);
973 if (ret)
974 return ret;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000975
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000976 BEGIN_NV04(chan, NvSubCopy,
Ben Skeggs6ee73862009-12-11 19:24:15 +1000977 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000978 OUT_RING (chan, src_offset);
979 OUT_RING (chan, dst_offset);
980 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
981 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
982 OUT_RING (chan, PAGE_SIZE); /* line_length */
983 OUT_RING (chan, line_count);
984 OUT_RING (chan, 0x00000101);
985 OUT_RING (chan, 0x00000000);
Ben Skeggsd1b167e2012-05-04 14:01:52 +1000986 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000987 OUT_RING (chan, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000988
989 page_count -= line_count;
990 src_offset += (PAGE_SIZE * line_count);
991 dst_offset += (PAGE_SIZE * line_count);
992 }
993
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000994 return 0;
995}
996
997static int
Ben Skeggs3c57d852013-11-22 10:35:25 +1000998nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
Ben Skeggs605f9cc2016-05-17 11:13:37 +1000999 struct ttm_mem_reg *reg)
Ben Skeggsd2f966662011-06-06 20:54:42 +10001000{
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001001 struct nvkm_mem *old_mem = bo->mem.mm_node;
1002 struct nvkm_mem *new_mem = reg->mm_node;
1003 u64 size = (u64)reg->num_pages << PAGE_SHIFT;
Ben Skeggsd2f966662011-06-06 20:54:42 +10001004 int ret;
1005
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001006 ret = nvkm_vm_get(drm->client.vm, size, old_mem->page_shift,
1007 NV_MEM_ACCESS_RW, &old_mem->vma[0]);
Ben Skeggsd2f966662011-06-06 20:54:42 +10001008 if (ret)
1009 return ret;
1010
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001011 ret = nvkm_vm_get(drm->client.vm, size, new_mem->page_shift,
1012 NV_MEM_ACCESS_RW, &old_mem->vma[1]);
Ben Skeggs3c57d852013-11-22 10:35:25 +10001013 if (ret) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001014 nvkm_vm_put(&old_mem->vma[0]);
Ben Skeggs3c57d852013-11-22 10:35:25 +10001015 return ret;
1016 }
1017
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001018 nvkm_vm_map(&old_mem->vma[0], old_mem);
1019 nvkm_vm_map(&old_mem->vma[1], new_mem);
Ben Skeggsd2f966662011-06-06 20:54:42 +10001020 return 0;
1021}
1022
1023static int
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001024nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001025 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001026{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001027 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
Dave Jones1934a2a2013-09-17 17:26:34 -04001028 struct nouveau_channel *chan = drm->ttm.chan;
Ben Skeggsa01ca782015-08-20 14:54:15 +10001029 struct nouveau_cli *cli = (void *)chan->user.client;
Ben Skeggs35b81412013-11-22 10:39:57 +10001030 struct nouveau_fence *fence;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +10001031 int ret;
1032
Ben Skeggsd2f966662011-06-06 20:54:42 +10001033 /* create temporary vmas for the transfer and attach them to the
Ben Skeggsbe83cd42015-01-14 15:36:34 +10001034 * old nvkm_mem node, these will get cleaned up after ttm has
Ben Skeggsd2f966662011-06-06 20:54:42 +10001035 * destroyed the ttm_mem_reg
Ben Skeggs3425df42011-02-10 11:22:12 +10001036 */
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001037 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001038 ret = nouveau_bo_move_prep(drm, bo, new_reg);
Ben Skeggsd2f966662011-06-06 20:54:42 +10001039 if (ret)
Ben Skeggs3c57d852013-11-22 10:35:25 +10001040 return ret;
Ben Skeggs3425df42011-02-10 11:22:12 +10001041 }
1042
Ben Skeggs0ad72862014-08-10 04:10:22 +10001043 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
Maarten Lankhorste3be4c22014-09-16 11:15:07 +02001044 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
Ben Skeggs6a6b73f2010-10-05 16:53:48 +10001045 if (ret == 0) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001046 ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
Ben Skeggs35b81412013-11-22 10:39:57 +10001047 if (ret == 0) {
1048 ret = nouveau_fence_new(chan, false, &fence);
1049 if (ret == 0) {
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +02001050 ret = ttm_bo_move_accel_cleanup(bo,
1051 &fence->base,
Ben Skeggs35b81412013-11-22 10:39:57 +10001052 evict,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001053 new_reg);
Ben Skeggs35b81412013-11-22 10:39:57 +10001054 nouveau_fence_unref(&fence);
1055 }
1056 }
Ben Skeggs6a6b73f2010-10-05 16:53:48 +10001057 }
Ben Skeggs0ad72862014-08-10 04:10:22 +10001058 mutex_unlock(&cli->mutex);
Ben Skeggs6a6b73f2010-10-05 16:53:48 +10001059 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001060}
1061
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001062void
Ben Skeggs49981042012-08-06 19:38:25 +10001063nouveau_bo_move_init(struct nouveau_drm *drm)
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001064{
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001065 static const struct {
1066 const char *name;
Ben Skeggs1a460982012-05-04 15:17:28 +10001067 int engine;
Ben Skeggs315a8b22015-08-20 14:54:16 +10001068 s32 oclass;
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001069 int (*exec)(struct nouveau_channel *,
1070 struct ttm_buffer_object *,
1071 struct ttm_mem_reg *, struct ttm_mem_reg *);
1072 int (*init)(struct nouveau_channel *, u32 handle);
1073 } _methods[] = {
Ben Skeggs146cfe22016-07-09 10:41:01 +10001074 { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
1075 { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
Ben Skeggs8e7e15862016-07-09 10:41:01 +10001076 { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
1077 { "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
Ben Skeggs990b4542015-04-14 11:50:35 +10001078 { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
1079 { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
Ben Skeggs00fc6f62013-07-09 14:20:15 +10001080 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
Ben Skeggs49981042012-08-06 19:38:25 +10001081 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
Ben Skeggs1a460982012-05-04 15:17:28 +10001082 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1083 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1084 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1085 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1086 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1087 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1088 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
Ben Skeggs5490e5d2012-05-04 14:34:16 +10001089 {},
Ben Skeggs1a460982012-05-04 15:17:28 +10001090 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001091 }, *mthd = _methods;
1092 const char *name = "CPU";
1093 int ret;
1094
1095 do {
Ben Skeggs49981042012-08-06 19:38:25 +10001096 struct nouveau_channel *chan;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001097
Ben Skeggs00fc6f62013-07-09 14:20:15 +10001098 if (mthd->engine)
Ben Skeggs49981042012-08-06 19:38:25 +10001099 chan = drm->cechan;
1100 else
1101 chan = drm->channel;
1102 if (chan == NULL)
1103 continue;
1104
Ben Skeggsa01ca782015-08-20 14:54:15 +10001105 ret = nvif_object_init(&chan->user,
Ben Skeggs0ad72862014-08-10 04:10:22 +10001106 mthd->oclass | (mthd->engine << 16),
1107 mthd->oclass, NULL, 0,
1108 &drm->ttm.copy);
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001109 if (ret == 0) {
Ben Skeggs0ad72862014-08-10 04:10:22 +10001110 ret = mthd->init(chan, drm->ttm.copy.handle);
Ben Skeggsebb945a2012-07-20 08:17:34 +10001111 if (ret) {
Ben Skeggs0ad72862014-08-10 04:10:22 +10001112 nvif_object_fini(&drm->ttm.copy);
Ben Skeggsebb945a2012-07-20 08:17:34 +10001113 continue;
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001114 }
Ben Skeggsebb945a2012-07-20 08:17:34 +10001115
1116 drm->ttm.move = mthd->exec;
Ben Skeggs1bb3f6a2013-07-08 10:40:35 +10001117 drm->ttm.chan = chan;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001118 name = mthd->name;
1119 break;
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001120 }
1121 } while ((++mthd)->exec);
1122
Ben Skeggsebb945a2012-07-20 08:17:34 +10001123 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
Ben Skeggsd1b167e2012-05-04 14:01:52 +10001124}
1125
Ben Skeggs6ee73862009-12-11 19:24:15 +10001126static int
1127nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001128 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
Ben Skeggs6ee73862009-12-11 19:24:15 +10001129{
Christian Königf1217ed2014-08-27 13:16:04 +02001130 struct ttm_place placement_memtype = {
1131 .fpfn = 0,
1132 .lpfn = 0,
1133 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1134 };
Ben Skeggs6ee73862009-12-11 19:24:15 +10001135 struct ttm_placement placement;
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001136 struct ttm_mem_reg tmp_reg;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001137 int ret;
1138
Ben Skeggs6ee73862009-12-11 19:24:15 +10001139 placement.num_placement = placement.num_busy_placement = 1;
Francisco Jerez77e2b5e2009-12-16 19:05:00 +01001140 placement.placement = placement.busy_placement = &placement_memtype;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001141
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001142 tmp_reg = *new_reg;
1143 tmp_reg.mm_node = NULL;
1144 ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001145 if (ret)
1146 return ret;
1147
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001148 ret = ttm_tt_bind(bo->ttm, &tmp_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001149 if (ret)
1150 goto out;
1151
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001152 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001153 if (ret)
1154 goto out;
1155
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001156 ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001157out:
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001158 ttm_bo_mem_put(bo, &tmp_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001159 return ret;
1160}
1161
1162static int
1163nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001164 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
Ben Skeggs6ee73862009-12-11 19:24:15 +10001165{
Christian Königf1217ed2014-08-27 13:16:04 +02001166 struct ttm_place placement_memtype = {
1167 .fpfn = 0,
1168 .lpfn = 0,
1169 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1170 };
Ben Skeggs6ee73862009-12-11 19:24:15 +10001171 struct ttm_placement placement;
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001172 struct ttm_mem_reg tmp_reg;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001173 int ret;
1174
Ben Skeggs6ee73862009-12-11 19:24:15 +10001175 placement.num_placement = placement.num_busy_placement = 1;
Francisco Jerez77e2b5e2009-12-16 19:05:00 +01001176 placement.placement = placement.busy_placement = &placement_memtype;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001177
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001178 tmp_reg = *new_reg;
1179 tmp_reg.mm_node = NULL;
1180 ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001181 if (ret)
1182 return ret;
1183
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001184 ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001185 if (ret)
1186 goto out;
1187
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001188 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001189 if (ret)
1190 goto out;
1191
1192out:
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001193 ttm_bo_mem_put(bo, &tmp_reg);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001194 return ret;
1195}
1196
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001197static void
Nicolai Hähnle66257db2016-12-15 17:23:49 +01001198nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001199 struct ttm_mem_reg *new_reg)
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001200{
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001201 struct nouveau_bo *nvbo = nouveau_bo(bo);
Ben Skeggsbe83cd42015-01-14 15:36:34 +10001202 struct nvkm_vma *vma;
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001203
Ben Skeggs9f1feed2012-01-25 15:34:22 +10001204 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1205 if (bo->destroy != nouveau_bo_del_ttm)
1206 return;
1207
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001208 list_for_each_entry(vma, &nvbo->vma_list, head) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001209 if (new_reg && new_reg->mem_type != TTM_PL_SYSTEM &&
1210 (new_reg->mem_type == TTM_PL_VRAM ||
Ben Skeggs5ce3bf32015-01-14 09:57:36 +10001211 nvbo->page_shift != vma->vm->mmu->lpg_shift)) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001212 nvkm_vm_map(vma, new_reg->mm_node);
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001213 } else {
Ben Skeggs10dcab32016-12-12 17:52:45 +10001214 WARN_ON(ttm_bo_wait(bo, false, false));
Ben Skeggsbe83cd42015-01-14 15:36:34 +10001215 nvkm_vm_unmap(vma);
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001216 }
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001217 }
1218}
1219
Ben Skeggs6ee73862009-12-11 19:24:15 +10001220static int
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001221nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg,
Ben Skeggsebb945a2012-07-20 08:17:34 +10001222 struct nouveau_drm_tile **new_tile)
Ben Skeggs6ee73862009-12-11 19:24:15 +10001223{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001224 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1225 struct drm_device *dev = drm->dev;
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001226 struct nouveau_bo *nvbo = nouveau_bo(bo);
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001227 u64 offset = new_reg->start << PAGE_SHIFT;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001228
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001229 *new_tile = NULL;
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001230 if (new_reg->mem_type != TTM_PL_VRAM)
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001231 return 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001232
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001233 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001234 *new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
Francisco Jereza5cf68b2010-10-24 16:14:41 +02001235 nvbo->tile_mode,
1236 nvbo->tile_flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001237 }
1238
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001239 return 0;
1240}
Ben Skeggs6ee73862009-12-11 19:24:15 +10001241
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001242static void
1243nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
Ben Skeggsebb945a2012-07-20 08:17:34 +10001244 struct nouveau_drm_tile *new_tile,
1245 struct nouveau_drm_tile **old_tile)
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001246{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001247 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1248 struct drm_device *dev = drm->dev;
Chris Wilsonf54d1862016-10-25 13:00:45 +01001249 struct dma_fence *fence = reservation_object_get_excl(bo->resv);
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001250
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +02001251 nv10_bo_put_tile_region(dev, *old_tile, fence);
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001252 *old_tile = new_tile;
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001253}
1254
1255static int
1256nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001257 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001258{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001259 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001260 struct nouveau_bo *nvbo = nouveau_bo(bo);
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001261 struct ttm_mem_reg *old_reg = &bo->mem;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001262 struct nouveau_drm_tile *new_tile = NULL;
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001263 int ret = 0;
1264
Christian König88932a72016-06-06 10:17:53 +02001265 ret = ttm_bo_wait(bo, intr, no_wait_gpu);
1266 if (ret)
1267 return ret;
1268
Alexandre Courbot5be5a152014-10-27 18:11:52 +09001269 if (nvbo->pin_refcnt)
1270 NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
1271
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001272 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001273 ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001274 if (ret)
1275 return ret;
1276 }
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001277
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001278 /* Fake bo copy. */
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001279 if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
Ben Skeggs6ee73862009-12-11 19:24:15 +10001280 BUG_ON(bo->mem.mm_node != NULL);
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001281 bo->mem = *new_reg;
1282 new_reg->mm_node = NULL;
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001283 goto out;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001284 }
1285
Ben Skeggscef9e992013-11-22 10:52:54 +10001286 /* Hardware assisted copy. */
1287 if (drm->ttm.move) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001288 if (new_reg->mem_type == TTM_PL_SYSTEM)
Ben Skeggscef9e992013-11-22 10:52:54 +10001289 ret = nouveau_bo_move_flipd(bo, evict, intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001290 no_wait_gpu, new_reg);
1291 else if (old_reg->mem_type == TTM_PL_SYSTEM)
Ben Skeggscef9e992013-11-22 10:52:54 +10001292 ret = nouveau_bo_move_flips(bo, evict, intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001293 no_wait_gpu, new_reg);
Ben Skeggscef9e992013-11-22 10:52:54 +10001294 else
1295 ret = nouveau_bo_move_m2mf(bo, evict, intr,
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001296 no_wait_gpu, new_reg);
Ben Skeggscef9e992013-11-22 10:52:54 +10001297 if (!ret)
1298 goto out;
Ben Skeggsb8a6a802010-08-27 11:55:43 +10001299 }
1300
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001301 /* Fallback to software copy. */
Christian König8aa6d4f2016-04-06 11:12:04 +02001302 ret = ttm_bo_wait(bo, intr, no_wait_gpu);
Ben Skeggscef9e992013-11-22 10:52:54 +10001303 if (ret == 0)
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001304 ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_reg);
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001305
1306out:
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001307 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001308 if (ret)
1309 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1310 else
1311 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1312 }
Francisco Jereza0af9ad2009-12-11 16:51:09 +01001313
1314 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001315}
1316
1317static int
1318nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1319{
David Herrmannacb46522013-08-25 18:28:59 +02001320 struct nouveau_bo *nvbo = nouveau_bo(bo);
1321
David Herrmannd9a1f0b2016-09-01 14:48:33 +02001322 return drm_vma_node_verify_access(&nvbo->gem.vma_node,
1323 filp->private_data);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001324}
1325
Jerome Glissef32f02f2010-04-09 14:39:25 +02001326static int
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001327nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
Jerome Glissef32f02f2010-04-09 14:39:25 +02001328{
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001329 struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type];
Ben Skeggsebb945a2012-07-20 08:17:34 +10001330 struct nouveau_drm *drm = nouveau_bdev(bdev);
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001331 struct nvkm_device *device = nvxx_device(&drm->client.device);
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001332 struct nvkm_mem *mem = reg->mm_node;
Ben Skeggsf869ef82010-11-15 11:53:16 +10001333 int ret;
Jerome Glissef32f02f2010-04-09 14:39:25 +02001334
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001335 reg->bus.addr = NULL;
1336 reg->bus.offset = 0;
1337 reg->bus.size = reg->num_pages << PAGE_SHIFT;
1338 reg->bus.base = 0;
1339 reg->bus.is_iomem = false;
Jerome Glissef32f02f2010-04-09 14:39:25 +02001340 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1341 return -EINVAL;
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001342 switch (reg->mem_type) {
Jerome Glissef32f02f2010-04-09 14:39:25 +02001343 case TTM_PL_SYSTEM:
1344 /* System memory */
1345 return 0;
1346 case TTM_PL_TT:
Daniel Vettera7fb8a22015-09-09 16:45:52 +02001347#if IS_ENABLED(CONFIG_AGP)
Ben Skeggs340b0e72015-08-20 14:54:23 +10001348 if (drm->agp.bridge) {
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001349 reg->bus.offset = reg->start << PAGE_SHIFT;
1350 reg->bus.base = drm->agp.base;
1351 reg->bus.is_iomem = !drm->agp.cma;
Jerome Glissef32f02f2010-04-09 14:39:25 +02001352 }
1353#endif
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001354 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || !mem->memtype)
Maarten Lankhorsta5540902013-11-12 13:34:09 +01001355 /* untiled */
1356 break;
1357 /* fallthrough, tiled memory */
Jerome Glissef32f02f2010-04-09 14:39:25 +02001358 case TTM_PL_VRAM:
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001359 reg->bus.offset = reg->start << PAGE_SHIFT;
1360 reg->bus.base = device->func->resource_addr(device, 1);
1361 reg->bus.is_iomem = true;
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001362 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1363 struct nvkm_bar *bar = nvxx_bar(&drm->client.device);
Ben Skeggsd8e83992015-08-20 14:54:17 +10001364 int page_shift = 12;
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001365 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001366 page_shift = mem->page_shift;
Ben Skeggs3863c9b2012-07-14 19:09:17 +10001367
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001368 ret = nvkm_bar_umap(bar, mem->size << 12, page_shift,
1369 &mem->bar_vma);
Ben Skeggs3863c9b2012-07-14 19:09:17 +10001370 if (ret)
1371 return ret;
1372
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001373 nvkm_vm_map(&mem->bar_vma, mem);
1374 reg->bus.offset = mem->bar_vma.offset;
Ben Skeggs3863c9b2012-07-14 19:09:17 +10001375 }
Jerome Glissef32f02f2010-04-09 14:39:25 +02001376 break;
1377 default:
1378 return -EINVAL;
1379 }
1380 return 0;
1381}
1382
1383static void
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001384nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
Jerome Glissef32f02f2010-04-09 14:39:25 +02001385{
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001386 struct nvkm_mem *mem = reg->mm_node;
Ben Skeggsf869ef82010-11-15 11:53:16 +10001387
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001388 if (!mem->bar_vma.node)
Ben Skeggsf869ef82010-11-15 11:53:16 +10001389 return;
1390
Ben Skeggs605f9cc2016-05-17 11:13:37 +10001391 nvkm_vm_unmap(&mem->bar_vma);
1392 nvkm_vm_put(&mem->bar_vma);
Jerome Glissef32f02f2010-04-09 14:39:25 +02001393}
1394
1395static int
1396nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1397{
Ben Skeggsebb945a2012-07-20 08:17:34 +10001398 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
Ben Skeggse1429b42010-09-10 11:12:25 +10001399 struct nouveau_bo *nvbo = nouveau_bo(bo);
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001400 struct nvkm_device *device = nvxx_device(&drm->client.device);
Ben Skeggs7e8820f2015-08-20 14:54:23 +10001401 u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
Christian Königf1217ed2014-08-27 13:16:04 +02001402 int i, ret;
Ben Skeggse1429b42010-09-10 11:12:25 +10001403
1404 /* as long as the bo isn't in vram, and isn't tiled, we've got
1405 * nothing to do here.
1406 */
1407 if (bo->mem.mem_type != TTM_PL_VRAM) {
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001408 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
Francisco Jerezf13b3262010-10-10 06:01:08 +02001409 !nouveau_bo_tile_layout(nvbo))
Ben Skeggse1429b42010-09-10 11:12:25 +10001410 return 0;
Maarten Lankhorsta5540902013-11-12 13:34:09 +01001411
1412 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1413 nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1414
1415 ret = nouveau_bo_validate(nvbo, false, false);
1416 if (ret)
1417 return ret;
1418 }
1419 return 0;
Ben Skeggse1429b42010-09-10 11:12:25 +10001420 }
1421
1422 /* make sure bo is in mappable vram */
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001423 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
Maarten Lankhorsta5540902013-11-12 13:34:09 +01001424 bo->mem.start + bo->mem.num_pages < mappable)
Ben Skeggse1429b42010-09-10 11:12:25 +10001425 return 0;
1426
Christian Königf1217ed2014-08-27 13:16:04 +02001427 for (i = 0; i < nvbo->placement.num_placement; ++i) {
1428 nvbo->placements[i].fpfn = 0;
1429 nvbo->placements[i].lpfn = mappable;
1430 }
Ben Skeggse1429b42010-09-10 11:12:25 +10001431
Christian Königf1217ed2014-08-27 13:16:04 +02001432 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1433 nvbo->busy_placements[i].fpfn = 0;
1434 nvbo->busy_placements[i].lpfn = mappable;
1435 }
1436
Dave Airliec2848152012-05-18 15:31:12 +01001437 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001438 return nouveau_bo_validate(nvbo, false, false);
Jerome Glissef32f02f2010-04-09 14:39:25 +02001439}
1440
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001441static int
1442nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1443{
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001444 struct ttm_dma_tt *ttm_dma = (void *)ttm;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001445 struct nouveau_drm *drm;
Ben Skeggsbe83cd42015-01-14 15:36:34 +10001446 struct nvkm_device *device;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001447 struct drm_device *dev;
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001448 struct device *pdev;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001449 unsigned i;
1450 int r;
Dave Airlie22b33e82012-04-02 11:53:06 +01001451 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001452
1453 if (ttm->state != tt_unpopulated)
1454 return 0;
1455
Dave Airlie22b33e82012-04-02 11:53:06 +01001456 if (slave && ttm->sg) {
1457 /* make userspace faulting work */
1458 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1459 ttm_dma->dma_address, ttm->num_pages);
1460 ttm->state = tt_unbound;
1461 return 0;
1462 }
1463
Ben Skeggsebb945a2012-07-20 08:17:34 +10001464 drm = nouveau_bdev(ttm->bdev);
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001465 device = nvxx_device(&drm->client.device);
Ben Skeggsebb945a2012-07-20 08:17:34 +10001466 dev = drm->dev;
Ben Skeggs26c9e8e2015-08-20 14:54:23 +10001467 pdev = device->dev;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001468
Daniel Vettera7fb8a22015-09-09 16:45:52 +02001469#if IS_ENABLED(CONFIG_AGP)
Ben Skeggs340b0e72015-08-20 14:54:23 +10001470 if (drm->agp.bridge) {
Jerome Glissedea7e0a2012-01-03 17:37:37 -05001471 return ttm_agp_tt_populate(ttm);
1472 }
1473#endif
1474
Alexandre Courbot9bcd38d2016-03-02 19:12:27 +09001475#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001476 if (swiotlb_nr_tbl()) {
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001477 return ttm_dma_populate((void *)ttm, dev->dev);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001478 }
1479#endif
1480
1481 r = ttm_pool_populate(ttm);
1482 if (r) {
1483 return r;
1484 }
1485
1486 for (i = 0; i < ttm->num_pages; i++) {
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001487 dma_addr_t addr;
1488
1489 addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
1490 DMA_BIDIRECTIONAL);
1491
1492 if (dma_mapping_error(pdev, addr)) {
Rasmus Villemoes4fbbed42016-02-15 19:41:46 +01001493 while (i--) {
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001494 dma_unmap_page(pdev, ttm_dma->dma_address[i],
1495 PAGE_SIZE, DMA_BIDIRECTIONAL);
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001496 ttm_dma->dma_address[i] = 0;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001497 }
1498 ttm_pool_unpopulate(ttm);
1499 return -EFAULT;
1500 }
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001501
1502 ttm_dma->dma_address[i] = addr;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001503 }
1504 return 0;
1505}
1506
1507static void
1508nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1509{
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001510 struct ttm_dma_tt *ttm_dma = (void *)ttm;
Ben Skeggsebb945a2012-07-20 08:17:34 +10001511 struct nouveau_drm *drm;
Ben Skeggsbe83cd42015-01-14 15:36:34 +10001512 struct nvkm_device *device;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001513 struct drm_device *dev;
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001514 struct device *pdev;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001515 unsigned i;
Dave Airlie22b33e82012-04-02 11:53:06 +01001516 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1517
1518 if (slave)
1519 return;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001520
Ben Skeggsebb945a2012-07-20 08:17:34 +10001521 drm = nouveau_bdev(ttm->bdev);
Ben Skeggs1167c6b2016-05-18 13:57:42 +10001522 device = nvxx_device(&drm->client.device);
Ben Skeggsebb945a2012-07-20 08:17:34 +10001523 dev = drm->dev;
Ben Skeggs26c9e8e2015-08-20 14:54:23 +10001524 pdev = device->dev;
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001525
Daniel Vettera7fb8a22015-09-09 16:45:52 +02001526#if IS_ENABLED(CONFIG_AGP)
Ben Skeggs340b0e72015-08-20 14:54:23 +10001527 if (drm->agp.bridge) {
Jerome Glissedea7e0a2012-01-03 17:37:37 -05001528 ttm_agp_tt_unpopulate(ttm);
1529 return;
1530 }
1531#endif
1532
Alexandre Courbot9bcd38d2016-03-02 19:12:27 +09001533#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001534 if (swiotlb_nr_tbl()) {
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001535 ttm_dma_unpopulate((void *)ttm, dev->dev);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001536 return;
1537 }
1538#endif
1539
1540 for (i = 0; i < ttm->num_pages; i++) {
Jerome Glisse8e7e7052011-11-09 17:15:26 -05001541 if (ttm_dma->dma_address[i]) {
Alexandre Courbotfd1496a2014-07-31 18:09:42 +09001542 dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
1543 DMA_BIDIRECTIONAL);
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001544 }
1545 }
1546
1547 ttm_pool_unpopulate(ttm);
1548}
1549
Maarten Lankhorstdd7cfd62014-01-21 13:07:31 +01001550void
Maarten Lankhorst809e9442014-04-09 16:19:30 +02001551nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
Maarten Lankhorstdd7cfd62014-01-21 13:07:31 +01001552{
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +01001553 struct reservation_object *resv = nvbo->bo.resv;
Maarten Lankhorstdd7cfd62014-01-21 13:07:31 +01001554
Maarten Lankhorst809e9442014-04-09 16:19:30 +02001555 if (exclusive)
1556 reservation_object_add_excl_fence(resv, &fence->base);
1557 else if (fence)
1558 reservation_object_add_shared_fence(resv, &fence->base);
Maarten Lankhorstdd7cfd62014-01-21 13:07:31 +01001559}
1560
Ben Skeggs6ee73862009-12-11 19:24:15 +10001561struct ttm_bo_driver nouveau_bo_driver = {
Jerome Glisse649bf3c2011-11-01 20:46:13 -04001562 .ttm_tt_create = &nouveau_ttm_tt_create,
Konrad Rzeszutek Wilk3230cfc2011-10-17 17:14:26 -04001563 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1564 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
Ben Skeggs6ee73862009-12-11 19:24:15 +10001565 .invalidate_caches = nouveau_bo_invalidate_caches,
1566 .init_mem_type = nouveau_bo_init_mem_type,
Christian Königa2ab19fe2016-08-30 17:26:04 +02001567 .eviction_valuable = ttm_bo_eviction_valuable,
Ben Skeggs6ee73862009-12-11 19:24:15 +10001568 .evict_flags = nouveau_bo_evict_flags,
Ben Skeggsa4154bb2011-02-10 10:35:16 +10001569 .move_notify = nouveau_bo_move_ntfy,
Ben Skeggs6ee73862009-12-11 19:24:15 +10001570 .move = nouveau_bo_move,
1571 .verify_access = nouveau_bo_verify_access,
Jerome Glissef32f02f2010-04-09 14:39:25 +02001572 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1573 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1574 .io_mem_free = &nouveau_ttm_io_mem_free,
Ben Skeggs6ee73862009-12-11 19:24:15 +10001575};
1576
Ben Skeggsbe83cd42015-01-14 15:36:34 +10001577struct nvkm_vma *
1578nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nvkm_vm *vm)
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001579{
Ben Skeggsbe83cd42015-01-14 15:36:34 +10001580 struct nvkm_vma *vma;
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001581 list_for_each_entry(vma, &nvbo->vma_list, head) {
1582 if (vma->vm == vm)
1583 return vma;
1584 }
1585
1586 return NULL;
1587}
1588
1589int
Ben Skeggsbe83cd42015-01-14 15:36:34 +10001590nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm,
1591 struct nvkm_vma *vma)
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001592{
1593 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001594 int ret;
1595
Ben Skeggsbe83cd42015-01-14 15:36:34 +10001596 ret = nvkm_vm_get(vm, size, nvbo->page_shift,
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001597 NV_MEM_ACCESS_RW, vma);
1598 if (ret)
1599 return ret;
1600
Ben Skeggs2e2cfbe2013-11-15 11:56:49 +10001601 if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
1602 (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
Ben Skeggs5ce3bf32015-01-14 09:57:36 +10001603 nvbo->page_shift != vma->vm->mmu->lpg_shift))
Ben Skeggsbe83cd42015-01-14 15:36:34 +10001604 nvkm_vm_map(vma, nvbo->bo.mem.mm_node);
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001605
1606 list_add_tail(&vma->head, &nvbo->vma_list);
Ben Skeggs2fd3db62011-06-07 15:25:12 +10001607 vma->refcount = 1;
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001608 return 0;
1609}
1610
1611void
Ben Skeggsbe83cd42015-01-14 15:36:34 +10001612nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001613{
1614 if (vma->node) {
Ben Skeggsc4c70442013-05-07 09:48:30 +10001615 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
Ben Skeggsbe83cd42015-01-14 15:36:34 +10001616 nvkm_vm_unmap(vma);
1617 nvkm_vm_put(vma);
Ben Skeggsfd2871a2011-06-06 14:07:04 +10001618 list_del(&vma->head);
1619 }
1620}