blob: f260bf957959777cc6741518e6419c0dcaf36bc3 [file] [log] [blame]
Ben Skeggs292da612011-12-09 16:11:06 +10001/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
Rob Clarkc09dcbc2014-09-28 14:19:14 -040025#ifdef HAVE_CONFIG_H
26# include <config.h>
27#endif
28
Ben Skeggs292da612011-12-09 16:11:06 +100029#include <stdlib.h>
30#include <stdint.h>
Ben Skeggsc41b4942012-11-23 12:40:30 +100031#include <stddef.h>
Ben Skeggsf6b1b5b2015-11-24 10:10:04 +100032#include <errno.h>
Ben Skeggs292da612011-12-09 16:11:06 +100033
34#include "private.h"
35
Ben Skeggs4a3cbf52015-11-24 09:17:52 +100036#include "nvif/class.h"
Ben Skeggsc41b4942012-11-23 12:40:30 +100037
Ben Skeggs4a3cbf52015-11-24 09:17:52 +100038static int
Ben Skeggs292da612011-12-09 16:11:06 +100039abi16_chan_nv04(struct nouveau_object *obj)
40{
41 struct nouveau_device *dev = (struct nouveau_device *)obj->parent;
Ben Skeggs292da612011-12-09 16:11:06 +100042 struct nv04_fifo *nv04 = obj->data;
Emil Velikova9e58802015-08-15 18:01:53 +010043 struct drm_nouveau_channel_alloc req = {
44 .fb_ctxdma_handle = nv04->vram,
45 .tt_ctxdma_handle = nv04->gart
46 };
Ben Skeggs292da612011-12-09 16:11:06 +100047 int ret;
48
Ben Skeggs292da612011-12-09 16:11:06 +100049 ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_CHANNEL_ALLOC,
50 &req, sizeof(req));
51 if (ret)
52 return ret;
53
54 nv04->base.channel = req.channel;
55 nv04->base.pushbuf = req.pushbuf_domains;
56 nv04->notify = req.notifier_handle;
57 nv04->base.object->handle = req.channel;
58 nv04->base.object->length = sizeof(*nv04);
59 return 0;
60}
61
Ben Skeggs4a3cbf52015-11-24 09:17:52 +100062static int
Ben Skeggs292da612011-12-09 16:11:06 +100063abi16_chan_nvc0(struct nouveau_object *obj)
64{
65 struct nouveau_device *dev = (struct nouveau_device *)obj->parent;
Ben Skeggs73b9a282012-04-17 08:35:43 +100066 struct drm_nouveau_channel_alloc req = {};
Ben Skeggs292da612011-12-09 16:11:06 +100067 struct nvc0_fifo *nvc0 = obj->data;
68 int ret;
69
70 ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_CHANNEL_ALLOC,
71 &req, sizeof(req));
72 if (ret)
73 return ret;
74
75 nvc0->base.channel = req.channel;
76 nvc0->base.pushbuf = req.pushbuf_domains;
Christoph Bumiller754655c2012-04-19 20:03:39 +020077 nvc0->notify = req.notifier_handle;
Ben Skeggs292da612011-12-09 16:11:06 +100078 nvc0->base.object->handle = req.channel;
79 nvc0->base.object->length = sizeof(*nvc0);
80 return 0;
81}
82
Ben Skeggs4a3cbf52015-11-24 09:17:52 +100083static int
Ben Skeggsc41b4942012-11-23 12:40:30 +100084abi16_chan_nve0(struct nouveau_object *obj)
85{
86 struct nouveau_device *dev = (struct nouveau_device *)obj->parent;
87 struct drm_nouveau_channel_alloc req = {};
88 struct nve0_fifo *nve0 = obj->data;
89 int ret;
90
91 if (obj->length > offsetof(struct nve0_fifo, engine)) {
92 req.fb_ctxdma_handle = 0xffffffff;
93 req.tt_ctxdma_handle = nve0->engine;
94 }
95
96 ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_CHANNEL_ALLOC,
97 &req, sizeof(req));
98 if (ret)
99 return ret;
100
101 nve0->base.channel = req.channel;
102 nve0->base.pushbuf = req.pushbuf_domains;
103 nve0->notify = req.notifier_handle;
104 nve0->base.object->handle = req.channel;
105 nve0->base.object->length = sizeof(*nve0);
106 return 0;
107}
108
Ben Skeggs4a3cbf52015-11-24 09:17:52 +1000109static int
Ben Skeggs292da612011-12-09 16:11:06 +1000110abi16_engobj(struct nouveau_object *obj)
111{
112 struct drm_nouveau_grobj_alloc req = {
Emil Velikova9e58802015-08-15 18:01:53 +0100113 .channel = obj->parent->handle,
114 .handle = obj->handle,
115 .class = obj->oclass,
Ben Skeggs292da612011-12-09 16:11:06 +1000116 };
117 struct nouveau_device *dev;
118 int ret;
119
Ben Skeggs4a3cbf52015-11-24 09:17:52 +1000120 /* Older kernel versions did not have the concept of nouveau-
121 * specific classes and abused some NVIDIA-assigned ones for
122 * a SW class. The ABI16 layer has compatibility in place to
123 * translate these older identifiers to the newer ones.
124 *
125 * Clients that have been updated to use NVIF are required to
126 * use the newer class identifiers, which means that they'll
127 * break if running on an older kernel.
128 *
129 * To handle this case, when using ABI16, we translate to the
130 * older values which work on any kernel.
131 */
132 switch (req.class) {
133 case NVIF_CLASS_SW_NV04 : req.class = 0x006e; break;
134 case NVIF_CLASS_SW_NV10 : req.class = 0x016e; break;
135 case NVIF_CLASS_SW_NV50 : req.class = 0x506e; break;
136 case NVIF_CLASS_SW_GF100: req.class = 0x906e; break;
137 default:
138 break;
139 }
140
Ben Skeggs292da612011-12-09 16:11:06 +1000141 dev = nouveau_object_find(obj, NOUVEAU_DEVICE_CLASS);
142 ret = drmCommandWrite(dev->fd, DRM_NOUVEAU_GROBJ_ALLOC,
143 &req, sizeof(req));
144 if (ret)
145 return ret;
146
147 obj->length = sizeof(struct nouveau_object *);
148 return 0;
149}
150
Ben Skeggs4a3cbf52015-11-24 09:17:52 +1000151static int
Ben Skeggs292da612011-12-09 16:11:06 +1000152abi16_ntfy(struct nouveau_object *obj)
153{
154 struct nv04_notify *ntfy = obj->data;
155 struct drm_nouveau_notifierobj_alloc req = {
Emil Velikova9e58802015-08-15 18:01:53 +0100156 .channel = obj->parent->handle,
157 .handle = ntfy->object->handle,
158 .size = ntfy->length,
Ben Skeggs292da612011-12-09 16:11:06 +1000159 };
160 struct nouveau_device *dev;
161 int ret;
162
163 dev = nouveau_object_find(obj, NOUVEAU_DEVICE_CLASS);
164 ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_NOTIFIEROBJ_ALLOC,
165 &req, sizeof(req));
166 if (ret)
167 return ret;
168
169 ntfy->offset = req.offset;
170 ntfy->object->length = sizeof(*ntfy);
171 return 0;
172}
173
Ben Skeggsf6b1b5b2015-11-24 10:10:04 +1000174drm_private int
175abi16_sclass(struct nouveau_object *obj, struct nouveau_sclass **psclass)
176{
177 struct nouveau_sclass *sclass;
178 struct nouveau_device *dev;
179
180 if (!(sclass = calloc(8, sizeof(*sclass))))
181 return -ENOMEM;
182 *psclass = sclass;
183
184 switch (obj->oclass) {
185 case NOUVEAU_FIFO_CHANNEL_CLASS:
186 /* Older kernel versions were exposing the wrong video engine
187 * classes on certain G98:GF100 boards. This has since been
188 * corrected, but ABI16 has compatibility in place to avoid
189 * breaking older userspace.
190 *
191 * Clients that have been updated to use NVIF are required to
192 * use the correct classes, which means that they'll break if
193 * running on an older kernel.
194 *
195 * To handle this issue, if using the older kernel interfaces,
196 * we'll magic up a list containing the vdec classes that the
197 * kernel will accept for these boards. Clients should make
198 * use of this information instead of hardcoding classes for
199 * specific chipsets.
200 */
201 dev = (struct nouveau_device *)obj->parent;
202 if (dev->chipset >= 0x98 &&
203 dev->chipset != 0xa0 &&
204 dev->chipset < 0xc0) {
205 *sclass++ = (struct nouveau_sclass){
206 GT212_MSVLD, -1, -1
207 };
208 *sclass++ = (struct nouveau_sclass){
209 GT212_MSPDEC, -1, -1
210 };
211 *sclass++ = (struct nouveau_sclass){
212 GT212_MSPPP, -1, -1
213 };
214 }
215 break;
216 default:
217 break;
218 }
219
220 return sclass - *psclass;
221}
222
Emil Velikov76e97992015-03-23 21:52:00 +0000223drm_private void
Ben Skeggs4a3cbf52015-11-24 09:17:52 +1000224abi16_delete(struct nouveau_object *obj)
225{
226 struct nouveau_device *dev =
227 nouveau_object_find(obj, NOUVEAU_DEVICE_CLASS);
228 if (obj->oclass == NOUVEAU_FIFO_CHANNEL_CLASS) {
229 struct drm_nouveau_channel_free req;
230 req.channel = obj->handle;
231 drmCommandWrite(dev->fd, DRM_NOUVEAU_CHANNEL_FREE,
232 &req, sizeof(req));
233 } else {
234 struct drm_nouveau_gpuobj_free req;
235 req.channel = obj->parent->handle;
236 req.handle = obj->handle;
237 drmCommandWrite(dev->fd, DRM_NOUVEAU_GPUOBJ_FREE,
238 &req, sizeof(req));
239 }
240}
241
242drm_private bool
243abi16_object(struct nouveau_object *obj, int (**func)(struct nouveau_object *))
244{
245 struct nouveau_object *parent = obj->parent;
246
247 /* nouveau_object::length is (ab)used to determine whether the
248 * object is a legacy object (!=0), or a real NVIF object.
249 */
250 if ((parent->length != 0 && parent->oclass == NOUVEAU_DEVICE_CLASS)) {
251 if (obj->oclass == NOUVEAU_FIFO_CHANNEL_CLASS) {
252 struct nouveau_device *dev = (void *)parent;
253 if (dev->chipset < 0xc0)
254 *func = abi16_chan_nv04;
255 else
256 if (dev->chipset < 0xe0)
257 *func = abi16_chan_nvc0;
258 else
259 *func = abi16_chan_nve0;
260 return true;
261 }
262 } else
263 if ((parent->length != 0 &&
264 parent->oclass == NOUVEAU_FIFO_CHANNEL_CLASS)) {
265 if (obj->oclass == NOUVEAU_NOTIFIER_CLASS) {
266 *func = abi16_ntfy;
267 return true;
268 }
269
270 *func = abi16_engobj;
271 return false; /* try NVIF, if supported, before calling func */
272 }
273
274 *func = NULL;
275 return false;
276}
277
278drm_private void
Ben Skeggs292da612011-12-09 16:11:06 +1000279abi16_bo_info(struct nouveau_bo *bo, struct drm_nouveau_gem_info *info)
280{
281 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
282
283 nvbo->map_handle = info->map_handle;
284 bo->handle = info->handle;
285 bo->size = info->size;
286 bo->offset = info->offset;
287
288 bo->flags = 0;
289 if (info->domain & NOUVEAU_GEM_DOMAIN_VRAM)
290 bo->flags |= NOUVEAU_BO_VRAM;
291 if (info->domain & NOUVEAU_GEM_DOMAIN_GART)
292 bo->flags |= NOUVEAU_BO_GART;
293 if (!(info->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG))
294 bo->flags |= NOUVEAU_BO_CONTIG;
295 if (nvbo->map_handle)
296 bo->flags |= NOUVEAU_BO_MAP;
297
298 if (bo->device->chipset >= 0xc0) {
299 bo->config.nvc0.memtype = (info->tile_flags & 0xff00) >> 8;
300 bo->config.nvc0.tile_mode = info->tile_mode;
301 } else
302 if (bo->device->chipset >= 0x80 || bo->device->chipset == 0x50) {
303 bo->config.nv50.memtype = (info->tile_flags & 0x07f00) >> 8 |
304 (info->tile_flags & 0x30000) >> 9;
305 bo->config.nv50.tile_mode = info->tile_mode << 4;
306 } else {
307 bo->config.nv04.surf_flags = info->tile_flags & 7;
308 bo->config.nv04.surf_pitch = info->tile_mode;
309 }
310}
311
Emil Velikov76e97992015-03-23 21:52:00 +0000312drm_private int
Ben Skeggs292da612011-12-09 16:11:06 +1000313abi16_bo_init(struct nouveau_bo *bo, uint32_t alignment,
314 union nouveau_bo_config *config)
315{
316 struct nouveau_device *dev = bo->device;
317 struct drm_nouveau_gem_new req = {};
318 struct drm_nouveau_gem_info *info = &req.info;
319 int ret;
320
321 if (bo->flags & NOUVEAU_BO_VRAM)
322 info->domain |= NOUVEAU_GEM_DOMAIN_VRAM;
323 if (bo->flags & NOUVEAU_BO_GART)
324 info->domain |= NOUVEAU_GEM_DOMAIN_GART;
325 if (!info->domain)
326 info->domain |= NOUVEAU_GEM_DOMAIN_VRAM |
327 NOUVEAU_GEM_DOMAIN_GART;
328
329 if (bo->flags & NOUVEAU_BO_MAP)
330 info->domain |= NOUVEAU_GEM_DOMAIN_MAPPABLE;
331
Alexandre Courbota1acffd2015-05-21 15:08:28 +0900332 if (bo->flags & NOUVEAU_BO_COHERENT)
333 info->domain |= NOUVEAU_GEM_DOMAIN_COHERENT;
334
Ben Skeggs292da612011-12-09 16:11:06 +1000335 if (!(bo->flags & NOUVEAU_BO_CONTIG))
336 info->tile_flags = NOUVEAU_GEM_TILE_NONCONTIG;
337
338 info->size = bo->size;
339 req.align = alignment;
340
341 if (config) {
342 if (dev->chipset >= 0xc0) {
343 info->tile_flags = (config->nvc0.memtype & 0xff) << 8;
344 info->tile_mode = config->nvc0.tile_mode;
345 } else
346 if (dev->chipset >= 0x80 || dev->chipset == 0x50) {
347 info->tile_flags = (config->nv50.memtype & 0x07f) << 8 |
348 (config->nv50.memtype & 0x180) << 9;
349 info->tile_mode = config->nv50.tile_mode >> 4;
350 } else {
351 info->tile_flags = config->nv04.surf_flags & 7;
352 info->tile_mode = config->nv04.surf_pitch;
353 }
354 }
355
356 if (!nouveau_device(dev)->have_bo_usage)
357 info->tile_flags &= 0x0000ff00;
358
359 ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_GEM_NEW,
360 &req, sizeof(req));
361 if (ret == 0)
362 abi16_bo_info(bo, &req.info);
363 return ret;
364}