blob: f34175e02b2904658d39a5b222f9fa0f407ebb75 [file] [log] [blame]
Ben Skeggs292da612011-12-09 16:11:06 +10001/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <stdlib.h>
26#include <stdint.h>
Ben Skeggsc41b4942012-11-23 12:40:30 +100027#include <stddef.h>
Ben Skeggs292da612011-12-09 16:11:06 +100028
29#include "private.h"
30
Ben Skeggsc41b4942012-11-23 12:40:30 +100031
Ben Skeggs292da612011-12-09 16:11:06 +100032int
33abi16_chan_nv04(struct nouveau_object *obj)
34{
35 struct nouveau_device *dev = (struct nouveau_device *)obj->parent;
Ben Skeggs292da612011-12-09 16:11:06 +100036 struct nv04_fifo *nv04 = obj->data;
Marcin Slusarz9e0026d2012-06-09 20:56:37 +020037 struct drm_nouveau_channel_alloc req = {nv04->vram, nv04->gart};
Ben Skeggs292da612011-12-09 16:11:06 +100038 int ret;
39
Ben Skeggs292da612011-12-09 16:11:06 +100040 ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_CHANNEL_ALLOC,
41 &req, sizeof(req));
42 if (ret)
43 return ret;
44
45 nv04->base.channel = req.channel;
46 nv04->base.pushbuf = req.pushbuf_domains;
47 nv04->notify = req.notifier_handle;
48 nv04->base.object->handle = req.channel;
49 nv04->base.object->length = sizeof(*nv04);
50 return 0;
51}
52
53int
54abi16_chan_nvc0(struct nouveau_object *obj)
55{
56 struct nouveau_device *dev = (struct nouveau_device *)obj->parent;
Ben Skeggs73b9a282012-04-17 08:35:43 +100057 struct drm_nouveau_channel_alloc req = {};
Ben Skeggs292da612011-12-09 16:11:06 +100058 struct nvc0_fifo *nvc0 = obj->data;
59 int ret;
60
61 ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_CHANNEL_ALLOC,
62 &req, sizeof(req));
63 if (ret)
64 return ret;
65
66 nvc0->base.channel = req.channel;
67 nvc0->base.pushbuf = req.pushbuf_domains;
Christoph Bumiller754655c2012-04-19 20:03:39 +020068 nvc0->notify = req.notifier_handle;
Ben Skeggs292da612011-12-09 16:11:06 +100069 nvc0->base.object->handle = req.channel;
70 nvc0->base.object->length = sizeof(*nvc0);
71 return 0;
72}
73
74int
Ben Skeggsc41b4942012-11-23 12:40:30 +100075abi16_chan_nve0(struct nouveau_object *obj)
76{
77 struct nouveau_device *dev = (struct nouveau_device *)obj->parent;
78 struct drm_nouveau_channel_alloc req = {};
79 struct nve0_fifo *nve0 = obj->data;
80 int ret;
81
82 if (obj->length > offsetof(struct nve0_fifo, engine)) {
83 req.fb_ctxdma_handle = 0xffffffff;
84 req.tt_ctxdma_handle = nve0->engine;
85 }
86
87 ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_CHANNEL_ALLOC,
88 &req, sizeof(req));
89 if (ret)
90 return ret;
91
92 nve0->base.channel = req.channel;
93 nve0->base.pushbuf = req.pushbuf_domains;
94 nve0->notify = req.notifier_handle;
95 nve0->base.object->handle = req.channel;
96 nve0->base.object->length = sizeof(*nve0);
97 return 0;
98}
99
100int
Ben Skeggs292da612011-12-09 16:11:06 +1000101abi16_engobj(struct nouveau_object *obj)
102{
103 struct drm_nouveau_grobj_alloc req = {
104 obj->parent->handle, obj->handle, obj->oclass
105 };
106 struct nouveau_device *dev;
107 int ret;
108
109 dev = nouveau_object_find(obj, NOUVEAU_DEVICE_CLASS);
110 ret = drmCommandWrite(dev->fd, DRM_NOUVEAU_GROBJ_ALLOC,
111 &req, sizeof(req));
112 if (ret)
113 return ret;
114
115 obj->length = sizeof(struct nouveau_object *);
116 return 0;
117}
118
119int
120abi16_ntfy(struct nouveau_object *obj)
121{
122 struct nv04_notify *ntfy = obj->data;
123 struct drm_nouveau_notifierobj_alloc req = {
124 obj->parent->handle, ntfy->object->handle, ntfy->length
125 };
126 struct nouveau_device *dev;
127 int ret;
128
129 dev = nouveau_object_find(obj, NOUVEAU_DEVICE_CLASS);
130 ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_NOTIFIEROBJ_ALLOC,
131 &req, sizeof(req));
132 if (ret)
133 return ret;
134
135 ntfy->offset = req.offset;
136 ntfy->object->length = sizeof(*ntfy);
137 return 0;
138}
139
140void
141abi16_bo_info(struct nouveau_bo *bo, struct drm_nouveau_gem_info *info)
142{
143 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
144
145 nvbo->map_handle = info->map_handle;
146 bo->handle = info->handle;
147 bo->size = info->size;
148 bo->offset = info->offset;
149
150 bo->flags = 0;
151 if (info->domain & NOUVEAU_GEM_DOMAIN_VRAM)
152 bo->flags |= NOUVEAU_BO_VRAM;
153 if (info->domain & NOUVEAU_GEM_DOMAIN_GART)
154 bo->flags |= NOUVEAU_BO_GART;
155 if (!(info->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG))
156 bo->flags |= NOUVEAU_BO_CONTIG;
157 if (nvbo->map_handle)
158 bo->flags |= NOUVEAU_BO_MAP;
159
160 if (bo->device->chipset >= 0xc0) {
161 bo->config.nvc0.memtype = (info->tile_flags & 0xff00) >> 8;
162 bo->config.nvc0.tile_mode = info->tile_mode;
163 } else
164 if (bo->device->chipset >= 0x80 || bo->device->chipset == 0x50) {
165 bo->config.nv50.memtype = (info->tile_flags & 0x07f00) >> 8 |
166 (info->tile_flags & 0x30000) >> 9;
167 bo->config.nv50.tile_mode = info->tile_mode << 4;
168 } else {
169 bo->config.nv04.surf_flags = info->tile_flags & 7;
170 bo->config.nv04.surf_pitch = info->tile_mode;
171 }
172}
173
174int
175abi16_bo_init(struct nouveau_bo *bo, uint32_t alignment,
176 union nouveau_bo_config *config)
177{
178 struct nouveau_device *dev = bo->device;
179 struct drm_nouveau_gem_new req = {};
180 struct drm_nouveau_gem_info *info = &req.info;
181 int ret;
182
183 if (bo->flags & NOUVEAU_BO_VRAM)
184 info->domain |= NOUVEAU_GEM_DOMAIN_VRAM;
185 if (bo->flags & NOUVEAU_BO_GART)
186 info->domain |= NOUVEAU_GEM_DOMAIN_GART;
187 if (!info->domain)
188 info->domain |= NOUVEAU_GEM_DOMAIN_VRAM |
189 NOUVEAU_GEM_DOMAIN_GART;
190
191 if (bo->flags & NOUVEAU_BO_MAP)
192 info->domain |= NOUVEAU_GEM_DOMAIN_MAPPABLE;
193
194 if (!(bo->flags & NOUVEAU_BO_CONTIG))
195 info->tile_flags = NOUVEAU_GEM_TILE_NONCONTIG;
196
197 info->size = bo->size;
198 req.align = alignment;
199
200 if (config) {
201 if (dev->chipset >= 0xc0) {
202 info->tile_flags = (config->nvc0.memtype & 0xff) << 8;
203 info->tile_mode = config->nvc0.tile_mode;
204 } else
205 if (dev->chipset >= 0x80 || dev->chipset == 0x50) {
206 info->tile_flags = (config->nv50.memtype & 0x07f) << 8 |
207 (config->nv50.memtype & 0x180) << 9;
208 info->tile_mode = config->nv50.tile_mode >> 4;
209 } else {
210 info->tile_flags = config->nv04.surf_flags & 7;
211 info->tile_mode = config->nv04.surf_pitch;
212 }
213 }
214
215 if (!nouveau_device(dev)->have_bo_usage)
216 info->tile_flags &= 0x0000ff00;
217
218 ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_GEM_NEW,
219 &req, sizeof(req));
220 if (ret == 0)
221 abi16_bo_info(bo, &req.info);
222 return ret;
223}