blob: 46350b13df555b6d0246442e6c33c5b3a97b5853 [file] [log] [blame]
Ben Skeggs292da612011-12-09 16:11:06 +10001/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
Rob Clarkc09dcbc2014-09-28 14:19:14 -040025#ifdef HAVE_CONFIG_H
26# include <config.h>
27#endif
28
Ben Skeggs292da612011-12-09 16:11:06 +100029#include <stdlib.h>
30#include <stdint.h>
Ben Skeggsc41b4942012-11-23 12:40:30 +100031#include <stddef.h>
Ben Skeggsf6b1b5b2015-11-24 10:10:04 +100032#include <errno.h>
Ben Skeggs292da612011-12-09 16:11:06 +100033
34#include "private.h"
35
Ben Skeggs4a3cbf52015-11-24 09:17:52 +100036#include "nvif/class.h"
Ben Skeggsc41b4942012-11-23 12:40:30 +100037
Ben Skeggs4a3cbf52015-11-24 09:17:52 +100038static int
Ben Skeggs292da612011-12-09 16:11:06 +100039abi16_chan_nv04(struct nouveau_object *obj)
40{
Ben Skeggs4283e3f2015-11-24 11:00:53 +100041 struct nouveau_drm *drm = nouveau_drm(obj);
Ben Skeggs292da612011-12-09 16:11:06 +100042 struct nv04_fifo *nv04 = obj->data;
Emil Velikova9e58802015-08-15 18:01:53 +010043 struct drm_nouveau_channel_alloc req = {
44 .fb_ctxdma_handle = nv04->vram,
45 .tt_ctxdma_handle = nv04->gart
46 };
Ben Skeggs292da612011-12-09 16:11:06 +100047 int ret;
48
Ben Skeggs4283e3f2015-11-24 11:00:53 +100049 ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_CHANNEL_ALLOC,
Ben Skeggs292da612011-12-09 16:11:06 +100050 &req, sizeof(req));
51 if (ret)
52 return ret;
53
54 nv04->base.channel = req.channel;
55 nv04->base.pushbuf = req.pushbuf_domains;
56 nv04->notify = req.notifier_handle;
57 nv04->base.object->handle = req.channel;
58 nv04->base.object->length = sizeof(*nv04);
59 return 0;
60}
61
Ben Skeggs4a3cbf52015-11-24 09:17:52 +100062static int
Ben Skeggs292da612011-12-09 16:11:06 +100063abi16_chan_nvc0(struct nouveau_object *obj)
64{
Ben Skeggs4283e3f2015-11-24 11:00:53 +100065 struct nouveau_drm *drm = nouveau_drm(obj);
Ben Skeggs73b9a282012-04-17 08:35:43 +100066 struct drm_nouveau_channel_alloc req = {};
Ben Skeggs292da612011-12-09 16:11:06 +100067 struct nvc0_fifo *nvc0 = obj->data;
68 int ret;
69
Ben Skeggs4283e3f2015-11-24 11:00:53 +100070 ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_CHANNEL_ALLOC,
Ben Skeggs292da612011-12-09 16:11:06 +100071 &req, sizeof(req));
72 if (ret)
73 return ret;
74
75 nvc0->base.channel = req.channel;
76 nvc0->base.pushbuf = req.pushbuf_domains;
Christoph Bumiller754655c2012-04-19 20:03:39 +020077 nvc0->notify = req.notifier_handle;
Ben Skeggs292da612011-12-09 16:11:06 +100078 nvc0->base.object->handle = req.channel;
79 nvc0->base.object->length = sizeof(*nvc0);
80 return 0;
81}
82
Ben Skeggs4a3cbf52015-11-24 09:17:52 +100083static int
Ben Skeggsc41b4942012-11-23 12:40:30 +100084abi16_chan_nve0(struct nouveau_object *obj)
85{
Ben Skeggs4283e3f2015-11-24 11:00:53 +100086 struct nouveau_drm *drm = nouveau_drm(obj);
Ben Skeggsc41b4942012-11-23 12:40:30 +100087 struct drm_nouveau_channel_alloc req = {};
88 struct nve0_fifo *nve0 = obj->data;
89 int ret;
90
91 if (obj->length > offsetof(struct nve0_fifo, engine)) {
92 req.fb_ctxdma_handle = 0xffffffff;
93 req.tt_ctxdma_handle = nve0->engine;
94 }
95
Ben Skeggs4283e3f2015-11-24 11:00:53 +100096 ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_CHANNEL_ALLOC,
Ben Skeggsc41b4942012-11-23 12:40:30 +100097 &req, sizeof(req));
98 if (ret)
99 return ret;
100
101 nve0->base.channel = req.channel;
102 nve0->base.pushbuf = req.pushbuf_domains;
103 nve0->notify = req.notifier_handle;
104 nve0->base.object->handle = req.channel;
105 nve0->base.object->length = sizeof(*nve0);
106 return 0;
107}
108
Ben Skeggs4a3cbf52015-11-24 09:17:52 +1000109static int
Ben Skeggs292da612011-12-09 16:11:06 +1000110abi16_engobj(struct nouveau_object *obj)
111{
Ben Skeggs4283e3f2015-11-24 11:00:53 +1000112 struct nouveau_drm *drm = nouveau_drm(obj);
Ben Skeggs292da612011-12-09 16:11:06 +1000113 struct drm_nouveau_grobj_alloc req = {
Emil Velikova9e58802015-08-15 18:01:53 +0100114 .channel = obj->parent->handle,
115 .handle = obj->handle,
116 .class = obj->oclass,
Ben Skeggs292da612011-12-09 16:11:06 +1000117 };
Ben Skeggs292da612011-12-09 16:11:06 +1000118 int ret;
119
Ben Skeggs4a3cbf52015-11-24 09:17:52 +1000120 /* Older kernel versions did not have the concept of nouveau-
121 * specific classes and abused some NVIDIA-assigned ones for
122 * a SW class. The ABI16 layer has compatibility in place to
123 * translate these older identifiers to the newer ones.
124 *
125 * Clients that have been updated to use NVIF are required to
126 * use the newer class identifiers, which means that they'll
127 * break if running on an older kernel.
128 *
129 * To handle this case, when using ABI16, we translate to the
130 * older values which work on any kernel.
131 */
132 switch (req.class) {
133 case NVIF_CLASS_SW_NV04 : req.class = 0x006e; break;
134 case NVIF_CLASS_SW_NV10 : req.class = 0x016e; break;
135 case NVIF_CLASS_SW_NV50 : req.class = 0x506e; break;
136 case NVIF_CLASS_SW_GF100: req.class = 0x906e; break;
137 default:
138 break;
139 }
140
Ben Skeggs4283e3f2015-11-24 11:00:53 +1000141 ret = drmCommandWrite(drm->fd, DRM_NOUVEAU_GROBJ_ALLOC,
Ben Skeggs292da612011-12-09 16:11:06 +1000142 &req, sizeof(req));
143 if (ret)
144 return ret;
145
146 obj->length = sizeof(struct nouveau_object *);
147 return 0;
148}
149
Ben Skeggs4a3cbf52015-11-24 09:17:52 +1000150static int
Ben Skeggs292da612011-12-09 16:11:06 +1000151abi16_ntfy(struct nouveau_object *obj)
152{
Ben Skeggs4283e3f2015-11-24 11:00:53 +1000153 struct nouveau_drm *drm = nouveau_drm(obj);
Ben Skeggs292da612011-12-09 16:11:06 +1000154 struct nv04_notify *ntfy = obj->data;
155 struct drm_nouveau_notifierobj_alloc req = {
Emil Velikova9e58802015-08-15 18:01:53 +0100156 .channel = obj->parent->handle,
157 .handle = ntfy->object->handle,
158 .size = ntfy->length,
Ben Skeggs292da612011-12-09 16:11:06 +1000159 };
Ben Skeggs292da612011-12-09 16:11:06 +1000160 int ret;
161
Ben Skeggs4283e3f2015-11-24 11:00:53 +1000162 ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_NOTIFIEROBJ_ALLOC,
Ben Skeggs292da612011-12-09 16:11:06 +1000163 &req, sizeof(req));
164 if (ret)
165 return ret;
166
167 ntfy->offset = req.offset;
168 ntfy->object->length = sizeof(*ntfy);
169 return 0;
170}
171
Ben Skeggsf6b1b5b2015-11-24 10:10:04 +1000172drm_private int
173abi16_sclass(struct nouveau_object *obj, struct nouveau_sclass **psclass)
174{
175 struct nouveau_sclass *sclass;
176 struct nouveau_device *dev;
177
178 if (!(sclass = calloc(8, sizeof(*sclass))))
179 return -ENOMEM;
180 *psclass = sclass;
181
182 switch (obj->oclass) {
183 case NOUVEAU_FIFO_CHANNEL_CLASS:
184 /* Older kernel versions were exposing the wrong video engine
185 * classes on certain G98:GF100 boards. This has since been
186 * corrected, but ABI16 has compatibility in place to avoid
187 * breaking older userspace.
188 *
189 * Clients that have been updated to use NVIF are required to
190 * use the correct classes, which means that they'll break if
191 * running on an older kernel.
192 *
193 * To handle this issue, if using the older kernel interfaces,
194 * we'll magic up a list containing the vdec classes that the
195 * kernel will accept for these boards. Clients should make
196 * use of this information instead of hardcoding classes for
197 * specific chipsets.
198 */
199 dev = (struct nouveau_device *)obj->parent;
200 if (dev->chipset >= 0x98 &&
201 dev->chipset != 0xa0 &&
202 dev->chipset < 0xc0) {
203 *sclass++ = (struct nouveau_sclass){
204 GT212_MSVLD, -1, -1
205 };
206 *sclass++ = (struct nouveau_sclass){
207 GT212_MSPDEC, -1, -1
208 };
209 *sclass++ = (struct nouveau_sclass){
210 GT212_MSPPP, -1, -1
211 };
212 }
213 break;
214 default:
215 break;
216 }
217
218 return sclass - *psclass;
219}
220
Emil Velikov76e97992015-03-23 21:52:00 +0000221drm_private void
Ben Skeggs4a3cbf52015-11-24 09:17:52 +1000222abi16_delete(struct nouveau_object *obj)
223{
Ben Skeggs4283e3f2015-11-24 11:00:53 +1000224 struct nouveau_drm *drm = nouveau_drm(obj);
Ben Skeggs4a3cbf52015-11-24 09:17:52 +1000225 if (obj->oclass == NOUVEAU_FIFO_CHANNEL_CLASS) {
226 struct drm_nouveau_channel_free req;
227 req.channel = obj->handle;
Ben Skeggs4283e3f2015-11-24 11:00:53 +1000228 drmCommandWrite(drm->fd, DRM_NOUVEAU_CHANNEL_FREE,
Ben Skeggs4a3cbf52015-11-24 09:17:52 +1000229 &req, sizeof(req));
230 } else {
231 struct drm_nouveau_gpuobj_free req;
232 req.channel = obj->parent->handle;
233 req.handle = obj->handle;
Ben Skeggs4283e3f2015-11-24 11:00:53 +1000234 drmCommandWrite(drm->fd, DRM_NOUVEAU_GPUOBJ_FREE,
Ben Skeggs4a3cbf52015-11-24 09:17:52 +1000235 &req, sizeof(req));
236 }
237}
238
239drm_private bool
240abi16_object(struct nouveau_object *obj, int (**func)(struct nouveau_object *))
241{
242 struct nouveau_object *parent = obj->parent;
243
244 /* nouveau_object::length is (ab)used to determine whether the
245 * object is a legacy object (!=0), or a real NVIF object.
246 */
247 if ((parent->length != 0 && parent->oclass == NOUVEAU_DEVICE_CLASS)) {
248 if (obj->oclass == NOUVEAU_FIFO_CHANNEL_CLASS) {
249 struct nouveau_device *dev = (void *)parent;
250 if (dev->chipset < 0xc0)
251 *func = abi16_chan_nv04;
252 else
253 if (dev->chipset < 0xe0)
254 *func = abi16_chan_nvc0;
255 else
256 *func = abi16_chan_nve0;
257 return true;
258 }
259 } else
260 if ((parent->length != 0 &&
261 parent->oclass == NOUVEAU_FIFO_CHANNEL_CLASS)) {
262 if (obj->oclass == NOUVEAU_NOTIFIER_CLASS) {
263 *func = abi16_ntfy;
264 return true;
265 }
266
267 *func = abi16_engobj;
268 return false; /* try NVIF, if supported, before calling func */
269 }
270
271 *func = NULL;
272 return false;
273}
274
275drm_private void
Ben Skeggs292da612011-12-09 16:11:06 +1000276abi16_bo_info(struct nouveau_bo *bo, struct drm_nouveau_gem_info *info)
277{
278 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
279
280 nvbo->map_handle = info->map_handle;
281 bo->handle = info->handle;
282 bo->size = info->size;
283 bo->offset = info->offset;
284
285 bo->flags = 0;
286 if (info->domain & NOUVEAU_GEM_DOMAIN_VRAM)
287 bo->flags |= NOUVEAU_BO_VRAM;
288 if (info->domain & NOUVEAU_GEM_DOMAIN_GART)
289 bo->flags |= NOUVEAU_BO_GART;
290 if (!(info->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG))
291 bo->flags |= NOUVEAU_BO_CONTIG;
292 if (nvbo->map_handle)
293 bo->flags |= NOUVEAU_BO_MAP;
294
295 if (bo->device->chipset >= 0xc0) {
296 bo->config.nvc0.memtype = (info->tile_flags & 0xff00) >> 8;
297 bo->config.nvc0.tile_mode = info->tile_mode;
298 } else
299 if (bo->device->chipset >= 0x80 || bo->device->chipset == 0x50) {
300 bo->config.nv50.memtype = (info->tile_flags & 0x07f00) >> 8 |
301 (info->tile_flags & 0x30000) >> 9;
302 bo->config.nv50.tile_mode = info->tile_mode << 4;
303 } else {
304 bo->config.nv04.surf_flags = info->tile_flags & 7;
305 bo->config.nv04.surf_pitch = info->tile_mode;
306 }
307}
308
Emil Velikov76e97992015-03-23 21:52:00 +0000309drm_private int
Ben Skeggs292da612011-12-09 16:11:06 +1000310abi16_bo_init(struct nouveau_bo *bo, uint32_t alignment,
311 union nouveau_bo_config *config)
312{
313 struct nouveau_device *dev = bo->device;
Ben Skeggs4283e3f2015-11-24 11:00:53 +1000314 struct nouveau_drm *drm = nouveau_drm(&dev->object);
Ben Skeggs292da612011-12-09 16:11:06 +1000315 struct drm_nouveau_gem_new req = {};
316 struct drm_nouveau_gem_info *info = &req.info;
317 int ret;
318
319 if (bo->flags & NOUVEAU_BO_VRAM)
320 info->domain |= NOUVEAU_GEM_DOMAIN_VRAM;
321 if (bo->flags & NOUVEAU_BO_GART)
322 info->domain |= NOUVEAU_GEM_DOMAIN_GART;
323 if (!info->domain)
324 info->domain |= NOUVEAU_GEM_DOMAIN_VRAM |
325 NOUVEAU_GEM_DOMAIN_GART;
326
327 if (bo->flags & NOUVEAU_BO_MAP)
328 info->domain |= NOUVEAU_GEM_DOMAIN_MAPPABLE;
329
Alexandre Courbota1acffd2015-05-21 15:08:28 +0900330 if (bo->flags & NOUVEAU_BO_COHERENT)
331 info->domain |= NOUVEAU_GEM_DOMAIN_COHERENT;
332
Ben Skeggs292da612011-12-09 16:11:06 +1000333 if (!(bo->flags & NOUVEAU_BO_CONTIG))
334 info->tile_flags = NOUVEAU_GEM_TILE_NONCONTIG;
335
336 info->size = bo->size;
337 req.align = alignment;
338
339 if (config) {
340 if (dev->chipset >= 0xc0) {
341 info->tile_flags = (config->nvc0.memtype & 0xff) << 8;
342 info->tile_mode = config->nvc0.tile_mode;
343 } else
344 if (dev->chipset >= 0x80 || dev->chipset == 0x50) {
345 info->tile_flags = (config->nv50.memtype & 0x07f) << 8 |
346 (config->nv50.memtype & 0x180) << 9;
347 info->tile_mode = config->nv50.tile_mode >> 4;
348 } else {
349 info->tile_flags = config->nv04.surf_flags & 7;
350 info->tile_mode = config->nv04.surf_pitch;
351 }
352 }
353
354 if (!nouveau_device(dev)->have_bo_usage)
355 info->tile_flags &= 0x0000ff00;
356
Ben Skeggs4283e3f2015-11-24 11:00:53 +1000357 ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_GEM_NEW,
Ben Skeggs292da612011-12-09 16:11:06 +1000358 &req, sizeof(req));
359 if (ret == 0)
360 abi16_bo_info(bo, &req.info);
361 return ret;
362}