blob: 849238510bf6263d1221c5e1652d38b3fe2d264e [file] [log] [blame]
Satyajitcdcebd82018-01-12 14:49:05 +05301/*
2 * Copyright 2017 Advanced Micro Devices. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
5 */
6
7#ifdef DRV_AMDGPU
8
9#include <assert.h>
10#include <dlfcn.h>
11#include <errno.h>
Satyajit Sahufaeb0092018-08-07 15:17:18 +053012#include <fcntl.h>
Satyajitcdcebd82018-01-12 14:49:05 +053013#include <stdbool.h>
14#include <stdio.h>
15#include <string.h>
16#include <sys/mman.h>
17#include <unistd.h>
18#include <xf86drm.h>
19
20#include "dri.h"
21#include "drv_priv.h"
22#include "helpers.h"
23#include "util.h"
24
25static const struct {
26 uint32_t drm_format;
27 int dri_image_format;
28} drm_to_dri_image_formats[] = {
29 { DRM_FORMAT_R8, __DRI_IMAGE_FORMAT_R8 },
30 { DRM_FORMAT_GR88, __DRI_IMAGE_FORMAT_GR88 },
31 { DRM_FORMAT_RGB565, __DRI_IMAGE_FORMAT_RGB565 },
32 { DRM_FORMAT_XRGB8888, __DRI_IMAGE_FORMAT_XRGB8888 },
33 { DRM_FORMAT_ARGB8888, __DRI_IMAGE_FORMAT_ARGB8888 },
34 { DRM_FORMAT_XBGR8888, __DRI_IMAGE_FORMAT_XBGR8888 },
35 { DRM_FORMAT_ABGR8888, __DRI_IMAGE_FORMAT_ABGR8888 },
36 { DRM_FORMAT_XRGB2101010, __DRI_IMAGE_FORMAT_XRGB2101010 },
37 { DRM_FORMAT_ARGB2101010, __DRI_IMAGE_FORMAT_ARGB2101010 },
38};
39
40static int drm_format_to_dri_format(uint32_t drm_format)
41{
42 uint32_t i;
43 for (i = 0; i < ARRAY_SIZE(drm_to_dri_image_formats); i++) {
44 if (drm_to_dri_image_formats[i].drm_format == drm_format)
45 return drm_to_dri_image_formats[i].dri_image_format;
46 }
47
48 return 0;
49}
50
51static bool lookup_extension(const __DRIextension *const *extensions, const char *name,
52 int min_version, const __DRIextension **dst)
53{
54 while (*extensions) {
55 if ((*extensions)->name && !strcmp((*extensions)->name, name) &&
56 (*extensions)->version >= min_version) {
57 *dst = *extensions;
58 return true;
59 }
60
61 extensions++;
62 }
63
64 return false;
65}
66
67/*
68 * The DRI GEM namespace may be different from the minigbm's driver GEM namespace. We have
69 * to import into minigbm.
70 */
71static int import_into_minigbm(struct dri_driver *dri, struct bo *bo)
72{
73 uint32_t handle;
74 int prime_fd, ret;
75
76 if (!dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_FD, &prime_fd))
77 return -errno;
78
79 ret = drmPrimeFDToHandle(bo->drv->fd, prime_fd, &handle);
80 if (ret) {
81 drv_log("drmPrimeFDToHandle failed with %s\n", strerror(errno));
82 return ret;
83 }
84
85 bo->handles[0].u32 = handle;
86 close(prime_fd);
87 return 0;
88}
89
90/*
Satyajit Sahua8a38952018-06-27 12:11:12 +053091 * Close Gem Handle
92 */
93static void close_gem_handle(uint32_t handle, int fd)
94{
95 struct drm_gem_close gem_close;
96 int ret = 0;
97
98 memset(&gem_close, 0, sizeof(gem_close));
99 gem_close.handle = handle;
100 ret = drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
101 if (ret)
102 drv_log("DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n", handle, ret);
103}
104
105/*
Satyajitcdcebd82018-01-12 14:49:05 +0530106 * The caller is responsible for setting drv->priv to a structure that derives from dri_driver.
107 */
108int dri_init(struct driver *drv, const char *dri_so_path, const char *driver_suffix)
109{
110 char fname[128];
111 const __DRIextension **(*get_extensions)();
112 const __DRIextension *loader_extensions[] = { NULL };
113
114 struct dri_driver *dri = drv->priv;
Satyajit Sahufaeb0092018-08-07 15:17:18 +0530115
116 dri->fd = open(drmGetRenderDeviceNameFromFd(drv_get_fd(drv)), O_RDWR);
117 if (dri->fd < 0)
118 return -ENODEV;
119
Satyajitcdcebd82018-01-12 14:49:05 +0530120 dri->driver_handle = dlopen(dri_so_path, RTLD_NOW | RTLD_GLOBAL);
121 if (!dri->driver_handle)
Satyajit Sahufaeb0092018-08-07 15:17:18 +0530122 goto close_dri_fd;
Satyajitcdcebd82018-01-12 14:49:05 +0530123
124 snprintf(fname, sizeof(fname), __DRI_DRIVER_GET_EXTENSIONS "_%s", driver_suffix);
125 get_extensions = dlsym(dri->driver_handle, fname);
126 if (!get_extensions)
127 goto free_handle;
128
129 dri->extensions = get_extensions();
130 if (!dri->extensions)
131 goto free_handle;
132
133 if (!lookup_extension(dri->extensions, __DRI_CORE, 2,
134 (const __DRIextension **)&dri->core_extension))
135 goto free_handle;
136
137 /* Version 4 for createNewScreen2 */
138 if (!lookup_extension(dri->extensions, __DRI_DRI2, 4,
139 (const __DRIextension **)&dri->dri2_extension))
140 goto free_handle;
141
Satyajit Sahufaeb0092018-08-07 15:17:18 +0530142 dri->device = dri->dri2_extension->createNewScreen2(0, dri->fd, loader_extensions,
Satyajitcdcebd82018-01-12 14:49:05 +0530143 dri->extensions, &dri->configs, NULL);
144 if (!dri->device)
145 goto free_handle;
146
147 dri->context =
148 dri->dri2_extension->createNewContext(dri->device, *dri->configs, NULL, NULL);
149
150 if (!dri->context)
151 goto free_screen;
152
153 if (!lookup_extension(dri->core_extension->getExtensions(dri->device), __DRI_IMAGE, 12,
154 (const __DRIextension **)&dri->image_extension))
155 goto free_context;
156
157 if (!lookup_extension(dri->core_extension->getExtensions(dri->device), __DRI2_FLUSH, 4,
158 (const __DRIextension **)&dri->flush_extension))
159 goto free_context;
160
161 return 0;
162
163free_context:
164 dri->core_extension->destroyContext(dri->context);
165free_screen:
166 dri->core_extension->destroyScreen(dri->device);
167free_handle:
168 dlclose(dri->driver_handle);
169 dri->driver_handle = NULL;
Satyajit Sahufaeb0092018-08-07 15:17:18 +0530170close_dri_fd:
171 close(dri->fd);
Satyajitcdcebd82018-01-12 14:49:05 +0530172 return -ENODEV;
173}
174
175/*
176 * The caller is responsible for freeing drv->priv.
177 */
178void dri_close(struct driver *drv)
179{
180 struct dri_driver *dri = drv->priv;
181
182 dri->core_extension->destroyContext(dri->context);
183 dri->core_extension->destroyScreen(dri->device);
184 dlclose(dri->driver_handle);
185 dri->driver_handle = NULL;
Satyajit Sahufaeb0092018-08-07 15:17:18 +0530186 close(dri->fd);
Satyajitcdcebd82018-01-12 14:49:05 +0530187}
188
189int dri_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
190 uint64_t use_flags)
191{
192 unsigned int dri_use;
193 int ret, dri_format, stride, offset;
Bas Nieuwenhuizen7119d332020-02-07 20:20:30 +0100194 int modifier_upper, modifier_lower;
Satyajitcdcebd82018-01-12 14:49:05 +0530195 struct dri_driver *dri = bo->drv->priv;
196
Gurchetan Singh298b7572019-09-19 09:55:18 -0700197 assert(bo->meta.num_planes == 1);
Satyajitcdcebd82018-01-12 14:49:05 +0530198 dri_format = drm_format_to_dri_format(format);
199
200 /* Gallium drivers require shared to get the handle and stride. */
201 dri_use = __DRI_IMAGE_USE_SHARE;
202 if (use_flags & BO_USE_SCANOUT)
203 dri_use |= __DRI_IMAGE_USE_SCANOUT;
204 if (use_flags & BO_USE_CURSOR)
205 dri_use |= __DRI_IMAGE_USE_CURSOR;
Satyajit Sahua0e602b2018-05-03 16:10:11 +0530206 if (use_flags & BO_USE_LINEAR)
Satyajitcdcebd82018-01-12 14:49:05 +0530207 dri_use |= __DRI_IMAGE_USE_LINEAR;
208
209 bo->priv = dri->image_extension->createImage(dri->device, width, height, dri_format,
210 dri_use, NULL);
211 if (!bo->priv) {
212 ret = -errno;
213 return ret;
214 }
215
216 ret = import_into_minigbm(dri, bo);
217 if (ret)
218 goto free_image;
219
220 if (!dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_STRIDE, &stride)) {
221 ret = -errno;
Satyajit Sahua8a38952018-06-27 12:11:12 +0530222 goto close_handle;
Satyajitcdcebd82018-01-12 14:49:05 +0530223 }
224
225 if (!dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_OFFSET, &offset)) {
226 ret = -errno;
Satyajit Sahua8a38952018-06-27 12:11:12 +0530227 goto close_handle;
Satyajitcdcebd82018-01-12 14:49:05 +0530228 }
229
Bas Nieuwenhuizen7119d332020-02-07 20:20:30 +0100230 if (dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_MODIFIER_UPPER,
231 &modifier_upper) &&
232 dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_MODIFIER_LOWER,
233 &modifier_lower)) {
234 bo->meta.format_modifiers[0] =
235 ((uint64_t)modifier_upper << 32) | (uint32_t)modifier_lower;
236 } else {
237 bo->meta.format_modifiers[0] = DRM_FORMAT_MOD_INVALID;
238 }
239
Gurchetan Singh298b7572019-09-19 09:55:18 -0700240 bo->meta.strides[0] = stride;
241 bo->meta.sizes[0] = stride * height;
242 bo->meta.offsets[0] = offset;
243 bo->meta.total_size = offset + bo->meta.sizes[0];
Satyajitcdcebd82018-01-12 14:49:05 +0530244 return 0;
245
Satyajit Sahua8a38952018-06-27 12:11:12 +0530246close_handle:
247 close_gem_handle(bo->handles[0].u32, bo->drv->fd);
Satyajitcdcebd82018-01-12 14:49:05 +0530248free_image:
249 dri->image_extension->destroyImage(bo->priv);
250 return ret;
251}
252
253int dri_bo_import(struct bo *bo, struct drv_import_fd_data *data)
254{
255 int ret;
256 struct dri_driver *dri = bo->drv->priv;
257
Gurchetan Singh298b7572019-09-19 09:55:18 -0700258 assert(bo->meta.num_planes == 1);
Satyajitcdcebd82018-01-12 14:49:05 +0530259
260 // clang-format off
261 bo->priv = dri->image_extension->createImageFromFds(dri->device, data->width, data->height,
Gurchetan Singh298b7572019-09-19 09:55:18 -0700262 data->format, data->fds,
263 bo->meta.num_planes,
Satyajitcdcebd82018-01-12 14:49:05 +0530264 (int *)data->strides,
265 (int *)data->offsets, NULL);
266 // clang-format on
267 if (!bo->priv)
268 return -errno;
269
270 ret = import_into_minigbm(dri, bo);
271 if (ret) {
272 dri->image_extension->destroyImage(bo->priv);
273 return ret;
274 }
275
276 return 0;
277}
278
279int dri_bo_destroy(struct bo *bo)
280{
281 struct dri_driver *dri = bo->drv->priv;
282
283 assert(bo->priv);
Satyajit Sahua8a38952018-06-27 12:11:12 +0530284 close_gem_handle(bo->handles[0].u32, bo->drv->fd);
Satyajitcdcebd82018-01-12 14:49:05 +0530285 dri->image_extension->destroyImage(bo->priv);
286 bo->priv = NULL;
287 return 0;
288}
289
290/*
291 * Map an image plane.
292 *
293 * This relies on the underlying driver to do a decompressing and/or de-tiling
294 * blit if necessary,
295 *
296 * This function itself is not thread-safe; we rely on the fact that the caller
297 * locks a per-driver mutex.
298 */
299void *dri_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
300{
301 struct dri_driver *dri = bo->drv->priv;
Satyajitcdcebd82018-01-12 14:49:05 +0530302
303 /* GBM flags and DRI flags are the same. */
Gurchetan Singh298b7572019-09-19 09:55:18 -0700304 vma->addr = dri->image_extension->mapImage(dri->context, bo->priv, 0, 0, bo->meta.width,
305 bo->meta.height, map_flags,
306 (int *)&vma->map_strides[plane], &vma->priv);
Satyajitcdcebd82018-01-12 14:49:05 +0530307 if (!vma->addr)
308 return MAP_FAILED;
309
310 return vma->addr;
311}
312
313int dri_bo_unmap(struct bo *bo, struct vma *vma)
314{
315 struct dri_driver *dri = bo->drv->priv;
316
317 assert(vma->priv);
318 dri->image_extension->unmapImage(dri->context, bo->priv, vma->priv);
319
320 /*
321 * From gbm_dri.c in Mesa:
322 *
323 * "Not all DRI drivers use direct maps. They may queue up DMA operations
324 * on the mapping context. Since there is no explicit gbm flush mechanism,
325 * we need to flush here."
326 */
327
328 dri->flush_extension->flush_with_flags(dri->context, NULL, __DRI2_FLUSH_CONTEXT, 0);
329 return 0;
330}
331
ChromeOS Developer44588bb2020-03-02 16:32:09 +0100332size_t dri_num_planes_from_modifier(struct driver *drv, uint32_t format, uint64_t modifier)
333{
334 struct dri_driver *dri = drv->priv;
335 if (!dri->image_extension->queryDmaBufFormatModifierAttribs) {
336 /* We do not do any modifier checks here. The create will fail
337 * later if the modifier is not supported. */
338 return drv_num_planes_from_format(format);
339 }
340
341 uint64_t planes;
342 GLboolean ret = dri->image_extension->queryDmaBufFormatModifierAttribs(
343 dri->device, format, modifier, __DRI_IMAGE_ATTRIB_NUM_PLANES, &planes);
344 if (!ret)
345 return 0;
346
347 return planes;
348}
349
Satyajitcdcebd82018-01-12 14:49:05 +0530350#endif