blob: ada02ba44bc5bb2c2b047d64db2d353b2ce14fb4 [file] [log] [blame]
Gurchetan Singh46faf6b2016-08-05 14:40:07 -07001/*
Daniele Castagna7a755de2016-12-16 17:32:30 -05002 * Copyright 2016 The Chromium OS Authors. All rights reserved.
Gurchetan Singh46faf6b2016-08-05 14:40:07 -07003 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
5 */
6#include <assert.h>
Kristian H. Kristensenb1efbd82016-09-06 11:43:26 -07007#include <errno.h>
Gurchetan Singh46faf6b2016-08-05 14:40:07 -07008#include <fcntl.h>
Gurchetan Singh1647fbe2016-08-03 17:14:55 -07009#include <pthread.h>
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070010#include <stdint.h>
11#include <stdio.h>
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070012#include <string.h>
Gurchetan Singhef920532016-08-12 16:38:25 -070013#include <sys/mman.h>
Daniel Hung-yu Wu9607a482017-09-12 20:05:08 +080014#include <sys/types.h>
15#include <unistd.h>
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070016#include <xf86drm.h>
17
Alistair Strachan0cfaaa52018-03-19 14:03:23 -070018#ifdef __ANDROID__
19#include <cutils/log.h>
20#include <libgen.h>
21#endif
22
Yiwei Zhangb7a64442021-09-30 05:13:10 +000023#include "drv_helpers.h"
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070024#include "drv_priv.h"
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070025#include "util.h"
26
Akshu Agrawal0337d9b2016-07-28 15:35:45 +053027#ifdef DRV_AMDGPU
Gurchetan Singh3e9d3832017-10-31 10:36:25 -070028extern const struct backend backend_amdgpu;
Akshu Agrawal0337d9b2016-07-28 15:35:45 +053029#endif
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070030#ifdef DRV_I915
Gurchetan Singh3e9d3832017-10-31 10:36:25 -070031extern const struct backend backend_i915;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070032#endif
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070033#ifdef DRV_MEDIATEK
Gurchetan Singh3e9d3832017-10-31 10:36:25 -070034extern const struct backend backend_mediatek;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070035#endif
Rajesh Yadav7f79cb52018-01-22 18:29:06 +053036#ifdef DRV_MSM
37extern const struct backend backend_msm;
38#endif
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070039#ifdef DRV_ROCKCHIP
Gurchetan Singh3e9d3832017-10-31 10:36:25 -070040extern const struct backend backend_rockchip;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070041#endif
Niklas Schulze878fed42017-02-08 15:29:21 +010042#ifdef DRV_VC4
Gurchetan Singh3e9d3832017-10-31 10:36:25 -070043extern const struct backend backend_vc4;
Niklas Schulze878fed42017-02-08 15:29:21 +010044#endif
Anders Delliene5bef532020-06-10 10:30:44 +010045
46// Dumb / generic drivers
47extern const struct backend backend_evdi;
48extern const struct backend backend_marvell;
49extern const struct backend backend_meson;
50extern const struct backend backend_nouveau;
51extern const struct backend backend_komeda;
52extern const struct backend backend_radeon;
53extern const struct backend backend_synaptics;
Gurchetan Singh73c141e2021-01-21 14:51:19 -080054extern const struct backend backend_virtgpu;
Anders Delliene5bef532020-06-10 10:30:44 +010055extern const struct backend backend_udl;
François-Denis Gonthiercea0b842020-05-22 18:02:24 -040056extern const struct backend backend_vkms;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070057
Gurchetan Singh3e9d3832017-10-31 10:36:25 -070058static const struct backend *drv_get_backend(int fd)
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070059{
60 drmVersionPtr drm_version;
61 unsigned int i;
62
63 drm_version = drmGetVersion(fd);
64
65 if (!drm_version)
66 return NULL;
67
Gurchetan Singh3e9d3832017-10-31 10:36:25 -070068 const struct backend *backend_list[] = {
Akshu Agrawal0337d9b2016-07-28 15:35:45 +053069#ifdef DRV_AMDGPU
70 &backend_amdgpu,
71#endif
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070072#ifdef DRV_I915
73 &backend_i915,
74#endif
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070075#ifdef DRV_MEDIATEK
76 &backend_mediatek,
77#endif
Rajesh Yadav7f79cb52018-01-22 18:29:06 +053078#ifdef DRV_MSM
79 &backend_msm,
80#endif
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070081#ifdef DRV_ROCKCHIP
82 &backend_rockchip,
83#endif
Niklas Schulze878fed42017-02-08 15:29:21 +010084#ifdef DRV_VC4
85 &backend_vc4,
86#endif
Gurchetan Singh73c141e2021-01-21 14:51:19 -080087 &backend_evdi, &backend_marvell, &backend_meson, &backend_nouveau,
88 &backend_komeda, &backend_radeon, &backend_synaptics, &backend_virtgpu,
89 &backend_udl, &backend_virtgpu, &backend_vkms
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070090 };
91
David Stevens26fe6822020-03-09 12:23:42 +000092 for (i = 0; i < ARRAY_SIZE(backend_list); i++) {
93 const struct backend *b = backend_list[i];
David Stevens26fe6822020-03-09 12:23:42 +000094 if (!strcmp(drm_version->name, b->name)) {
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070095 drmFreeVersion(drm_version);
David Stevens26fe6822020-03-09 12:23:42 +000096 return b;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070097 }
David Stevens26fe6822020-03-09 12:23:42 +000098 }
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070099
100 drmFreeVersion(drm_version);
101 return NULL;
102}
103
104struct driver *drv_create(int fd)
105{
106 struct driver *drv;
107 int ret;
108
Gurchetan Singh1b1d56a2017-03-10 16:25:23 -0800109 drv = (struct driver *)calloc(1, sizeof(*drv));
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700110
111 if (!drv)
112 return NULL;
113
Pilar Molina Lopez28cf2f12020-11-12 18:19:42 -0500114 char *minigbm_debug;
115 minigbm_debug = getenv("MINIGBM_DEBUG");
116 drv->compression = (minigbm_debug == NULL) || (strcmp(minigbm_debug, "nocompression") != 0);
117
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700118 drv->fd = fd;
119 drv->backend = drv_get_backend(fd);
120
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700121 if (!drv->backend)
122 goto free_driver;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700123
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000124 if (pthread_mutex_init(&drv->buffer_table_lock, NULL))
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700125 goto free_driver;
Gurchetan Singh1647fbe2016-08-03 17:14:55 -0700126
127 drv->buffer_table = drmHashCreate();
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700128 if (!drv->buffer_table)
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000129 goto free_buffer_table_lock;
130
131 if (pthread_mutex_init(&drv->mappings_lock, NULL))
132 goto free_buffer_table;
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700133
Gurchetan Singhcfedbcc2017-11-02 17:32:00 -0700134 drv->mappings = drv_array_init(sizeof(struct mapping));
135 if (!drv->mappings)
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000136 goto free_mappings_lock;
Gurchetan Singh1647fbe2016-08-03 17:14:55 -0700137
Gurchetan Singhbc9a87d2017-11-03 17:17:35 -0700138 drv->combos = drv_array_init(sizeof(struct combination));
139 if (!drv->combos)
Gurchetan Singhcfedbcc2017-11-02 17:32:00 -0700140 goto free_mappings;
Gurchetan Singh179687e2016-10-28 10:07:35 -0700141
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700142 if (drv->backend->init) {
143 ret = drv->backend->init(drv);
Gurchetan Singh6b41fb52017-03-01 20:14:39 -0800144 if (ret) {
Gurchetan Singhbc9a87d2017-11-03 17:17:35 -0700145 drv_array_destroy(drv->combos);
Gurchetan Singhcfedbcc2017-11-02 17:32:00 -0700146 goto free_mappings;
Gurchetan Singh6b41fb52017-03-01 20:14:39 -0800147 }
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700148 }
149
150 return drv;
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700151
Gurchetan Singhcfedbcc2017-11-02 17:32:00 -0700152free_mappings:
153 drv_array_destroy(drv->mappings);
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000154free_mappings_lock:
155 pthread_mutex_destroy(&drv->mappings_lock);
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700156free_buffer_table:
157 drmHashDestroy(drv->buffer_table);
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000158free_buffer_table_lock:
159 pthread_mutex_destroy(&drv->buffer_table_lock);
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700160free_driver:
161 free(drv);
162 return NULL;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700163}
164
165void drv_destroy(struct driver *drv)
166{
167 if (drv->backend->close)
168 drv->backend->close(drv);
169
Gurchetan Singhbc9a87d2017-11-03 17:17:35 -0700170 drv_array_destroy(drv->combos);
Gurchetan Singh179687e2016-10-28 10:07:35 -0700171
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000172 drv_array_destroy(drv->mappings);
173 pthread_mutex_destroy(&drv->mappings_lock);
174
175 drmHashDestroy(drv->buffer_table);
176 pthread_mutex_destroy(&drv->buffer_table_lock);
Gurchetan Singh179687e2016-10-28 10:07:35 -0700177
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700178 free(drv);
179}
180
181int drv_get_fd(struct driver *drv)
182{
183 return drv->fd;
184}
185
Gurchetan Singh1b1d56a2017-03-10 16:25:23 -0800186const char *drv_get_name(struct driver *drv)
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700187{
188 return drv->backend->name;
189}
190
Gurchetan Singha1892b22017-09-28 16:40:52 -0700191struct combination *drv_get_combination(struct driver *drv, uint32_t format, uint64_t use_flags)
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700192{
Gurchetan Singh6b41fb52017-03-01 20:14:39 -0800193 struct combination *curr, *best;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700194
Gurchetan Singha1892b22017-09-28 16:40:52 -0700195 if (format == DRM_FORMAT_NONE || use_flags == BO_USE_NONE)
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700196 return 0;
197
Gurchetan Singh6b41fb52017-03-01 20:14:39 -0800198 best = NULL;
199 uint32_t i;
Gurchetan Singhbc9a87d2017-11-03 17:17:35 -0700200 for (i = 0; i < drv_array_size(drv->combos); i++) {
201 curr = drv_array_at_idx(drv->combos, i);
Gurchetan Singha1892b22017-09-28 16:40:52 -0700202 if ((format == curr->format) && use_flags == (curr->use_flags & use_flags))
Gurchetan Singh1b1d56a2017-03-10 16:25:23 -0800203 if (!best || best->metadata.priority < curr->metadata.priority)
Gurchetan Singh6b41fb52017-03-01 20:14:39 -0800204 best = curr;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700205 }
206
Gurchetan Singh6b41fb52017-03-01 20:14:39 -0800207 return best;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700208}
209
Gurchetan Singh18578ed2017-08-03 18:23:27 -0700210struct bo *drv_bo_new(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
David Stevens26fe6822020-03-09 12:23:42 +0000211 uint64_t use_flags, bool is_test_buffer)
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700212{
213
214 struct bo *bo;
Gurchetan Singh1b1d56a2017-03-10 16:25:23 -0800215 bo = (struct bo *)calloc(1, sizeof(*bo));
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700216
217 if (!bo)
218 return NULL;
219
220 bo->drv = drv;
Gurchetan Singh298b7572019-09-19 09:55:18 -0700221 bo->meta.width = width;
222 bo->meta.height = height;
223 bo->meta.format = format;
224 bo->meta.use_flags = use_flags;
225 bo->meta.num_planes = drv_num_planes_from_format(format);
David Stevens26fe6822020-03-09 12:23:42 +0000226 bo->is_test_buffer = is_test_buffer;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700227
Gurchetan Singh298b7572019-09-19 09:55:18 -0700228 if (!bo->meta.num_planes) {
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700229 free(bo);
Yiwei Zhang01b69742021-09-16 04:47:54 +0000230 errno = EINVAL;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700231 return NULL;
232 }
233
234 return bo;
235}
236
Jason Macnak04c8f512021-09-29 11:38:00 -0700237static void drv_bo_mapping_destroy(struct bo *bo)
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000238{
239 struct driver *drv = bo->drv;
240 uint32_t idx = 0;
241
242 /*
243 * This function is called right before the buffer is destroyed. It will free any mappings
244 * associated with the buffer.
245 */
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000246 pthread_mutex_lock(&drv->mappings_lock);
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000247 for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
248 while (idx < drv_array_size(drv->mappings)) {
249 struct mapping *mapping =
250 (struct mapping *)drv_array_at_idx(drv->mappings, idx);
251 if (mapping->vma->handle != bo->handles[plane].u32) {
252 idx++;
253 continue;
254 }
255
256 if (!--mapping->vma->refcount) {
257 int ret = drv->backend->bo_unmap(bo, mapping->vma);
258 if (ret) {
Yiwei Zhang8bc35bf2021-10-04 21:36:23 +0000259 pthread_mutex_unlock(&drv->mappings_lock);
260 assert(ret);
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000261 drv_log("munmap failed\n");
Jason Macnak04c8f512021-09-29 11:38:00 -0700262 return;
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000263 }
264
265 free(mapping->vma);
266 }
267
268 /* This shrinks and shifts the array, so don't increment idx. */
269 drv_array_remove(drv->mappings, idx);
270 }
271 }
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000272 pthread_mutex_unlock(&drv->mappings_lock);
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000273}
274
275/*
276 * Acquire a reference on plane buffers of the bo.
277 */
278static void drv_bo_acquire(struct bo *bo)
279{
280 struct driver *drv = bo->drv;
281
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000282 pthread_mutex_lock(&drv->buffer_table_lock);
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000283 for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
284 uintptr_t num = 0;
285
286 if (!drmHashLookup(drv->buffer_table, bo->handles[plane].u32, (void **)&num))
287 drmHashDelete(drv->buffer_table, bo->handles[plane].u32);
288
289 drmHashInsert(drv->buffer_table, bo->handles[plane].u32, (void *)(num + 1));
290 }
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000291 pthread_mutex_unlock(&drv->buffer_table_lock);
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000292}
293
294/*
295 * Release a reference on plane buffers of the bo. Return true when the bo has lost all its
296 * references. Otherwise, return false.
297 */
298static bool drv_bo_release(struct bo *bo)
299{
300 struct driver *drv = bo->drv;
Yiwei Zhang8bc35bf2021-10-04 21:36:23 +0000301 uintptr_t num;
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000302
Bas Nieuwenhuizen631d9e42021-11-10 13:47:16 +0100303 if (drv->backend->bo_release)
304 drv->backend->bo_release(bo);
305
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000306 pthread_mutex_lock(&drv->buffer_table_lock);
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000307 for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000308 if (!drmHashLookup(drv->buffer_table, bo->handles[plane].u32, (void **)&num)) {
309 drmHashDelete(drv->buffer_table, bo->handles[plane].u32);
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000310
Yiwei Zhang8bc35bf2021-10-04 21:36:23 +0000311 if (num > 1) {
312 drmHashInsert(drv->buffer_table, bo->handles[plane].u32,
313 (void *)(num - 1));
314 }
315 }
316 }
317
318 /* The same buffer can back multiple planes with different offsets. */
319 for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
320 if (!drmHashLookup(drv->buffer_table, bo->handles[plane].u32, (void **)&num)) {
321 /* num is positive if found in the hashmap. */
322 pthread_mutex_unlock(&drv->buffer_table_lock);
323 return false;
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000324 }
325 }
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000326 pthread_mutex_unlock(&drv->buffer_table_lock);
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000327
Yiwei Zhang8bc35bf2021-10-04 21:36:23 +0000328 return true;
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000329}
330
Gurchetan Singh1b1d56a2017-03-10 16:25:23 -0800331struct bo *drv_bo_create(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
Gurchetan Singha1892b22017-09-28 16:40:52 -0700332 uint64_t use_flags)
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700333{
334 int ret;
335 struct bo *bo;
David Stevens26fe6822020-03-09 12:23:42 +0000336 bool is_test_alloc;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700337
David Stevens26fe6822020-03-09 12:23:42 +0000338 is_test_alloc = use_flags & BO_USE_TEST_ALLOC;
339 use_flags &= ~BO_USE_TEST_ALLOC;
340
341 bo = drv_bo_new(drv, width, height, format, use_flags, is_test_alloc);
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700342
343 if (!bo)
344 return NULL;
345
David Stevens26fe6822020-03-09 12:23:42 +0000346 ret = -EINVAL;
347 if (drv->backend->bo_compute_metadata) {
348 ret = drv->backend->bo_compute_metadata(bo, width, height, format, use_flags, NULL,
349 0);
350 if (!is_test_alloc && ret == 0)
351 ret = drv->backend->bo_create_from_metadata(bo);
352 } else if (!is_test_alloc) {
353 ret = drv->backend->bo_create(bo, width, height, format, use_flags);
354 }
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700355
356 if (ret) {
Yiwei Zhang01b69742021-09-16 04:47:54 +0000357 errno = -ret;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700358 free(bo);
359 return NULL;
360 }
361
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000362 drv_bo_acquire(bo);
Gurchetan Singh1647fbe2016-08-03 17:14:55 -0700363
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700364 return bo;
365}
366
Gurchetan Singh1b1d56a2017-03-10 16:25:23 -0800367struct bo *drv_bo_create_with_modifiers(struct driver *drv, uint32_t width, uint32_t height,
368 uint32_t format, const uint64_t *modifiers, uint32_t count)
Kristian H. Kristensenb1efbd82016-09-06 11:43:26 -0700369{
370 int ret;
Kristian H. Kristensenb1efbd82016-09-06 11:43:26 -0700371 struct bo *bo;
372
David Stevens26fe6822020-03-09 12:23:42 +0000373 if (!drv->backend->bo_create_with_modifiers && !drv->backend->bo_compute_metadata) {
Kristian H. Kristensenb1efbd82016-09-06 11:43:26 -0700374 errno = ENOENT;
375 return NULL;
376 }
377
David Stevens26fe6822020-03-09 12:23:42 +0000378 bo = drv_bo_new(drv, width, height, format, BO_USE_NONE, false);
Kristian H. Kristensenb1efbd82016-09-06 11:43:26 -0700379
380 if (!bo)
381 return NULL;
382
David Stevens26fe6822020-03-09 12:23:42 +0000383 ret = -EINVAL;
384 if (drv->backend->bo_compute_metadata) {
385 ret = drv->backend->bo_compute_metadata(bo, width, height, format, BO_USE_NONE,
386 modifiers, count);
387 if (ret == 0)
388 ret = drv->backend->bo_create_from_metadata(bo);
389 } else {
390 ret = drv->backend->bo_create_with_modifiers(bo, width, height, format, modifiers,
391 count);
392 }
Kristian H. Kristensenb1efbd82016-09-06 11:43:26 -0700393
394 if (ret) {
395 free(bo);
396 return NULL;
397 }
398
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000399 drv_bo_acquire(bo);
Kristian H. Kristensenb1efbd82016-09-06 11:43:26 -0700400
401 return bo;
402}
403
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700404void drv_bo_destroy(struct bo *bo)
405{
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000406 if (!bo->is_test_buffer && drv_bo_release(bo)) {
Jason Macnak04c8f512021-09-29 11:38:00 -0700407 drv_bo_mapping_destroy(bo);
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000408 bo->drv->backend->bo_destroy(bo);
Tomasz Figa27a7e6a2017-08-08 17:59:41 +0900409 }
Gurchetan Singh1647fbe2016-08-03 17:14:55 -0700410
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700411 free(bo);
412}
413
414struct bo *drv_bo_import(struct driver *drv, struct drv_import_fd_data *data)
415{
416 int ret;
Gurchetan Singhb72badb2016-08-19 16:26:46 -0700417 size_t plane;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700418 struct bo *bo;
Gurchetan Singhc26fd1e2017-09-29 10:18:59 -0700419 off_t seek_end;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700420
David Stevens26fe6822020-03-09 12:23:42 +0000421 bo = drv_bo_new(drv, data->width, data->height, data->format, data->use_flags, false);
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700422
Gurchetan Singhb72badb2016-08-19 16:26:46 -0700423 if (!bo)
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700424 return NULL;
Gurchetan Singhb72badb2016-08-19 16:26:46 -0700425
Gurchetan Singh71611d62017-01-03 16:49:56 -0800426 ret = drv->backend->bo_import(bo, data);
427 if (ret) {
428 free(bo);
429 return NULL;
430 }
431
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000432 drv_bo_acquire(bo);
Satyajit Sahua047d412018-07-12 12:29:39 +0530433
Gurchetan Singh52155b42021-01-27 17:55:17 -0800434 bo->meta.format_modifier = data->format_modifier;
Gurchetan Singh298b7572019-09-19 09:55:18 -0700435 for (plane = 0; plane < bo->meta.num_planes; plane++) {
436 bo->meta.strides[plane] = data->strides[plane];
437 bo->meta.offsets[plane] = data->offsets[plane];
Gurchetan Singhc26fd1e2017-09-29 10:18:59 -0700438
439 seek_end = lseek(data->fds[plane], 0, SEEK_END);
440 if (seek_end == (off_t)(-1)) {
Alistair Strachan0cfaaa52018-03-19 14:03:23 -0700441 drv_log("lseek() failed with %s\n", strerror(errno));
Gurchetan Singhc26fd1e2017-09-29 10:18:59 -0700442 goto destroy_bo;
443 }
444
445 lseek(data->fds[plane], 0, SEEK_SET);
Gurchetan Singh298b7572019-09-19 09:55:18 -0700446 if (plane == bo->meta.num_planes - 1 || data->offsets[plane + 1] == 0)
447 bo->meta.sizes[plane] = seek_end - data->offsets[plane];
Gurchetan Singhc26fd1e2017-09-29 10:18:59 -0700448 else
Gurchetan Singh298b7572019-09-19 09:55:18 -0700449 bo->meta.sizes[plane] = data->offsets[plane + 1] - data->offsets[plane];
Gurchetan Singhc26fd1e2017-09-29 10:18:59 -0700450
Gurchetan Singh298b7572019-09-19 09:55:18 -0700451 if ((int64_t)bo->meta.offsets[plane] + bo->meta.sizes[plane] > seek_end) {
Alistair Strachan0cfaaa52018-03-19 14:03:23 -0700452 drv_log("buffer size is too large.\n");
Gurchetan Singhc26fd1e2017-09-29 10:18:59 -0700453 goto destroy_bo;
Daniel Hung-yu Wu9607a482017-09-12 20:05:08 +0800454 }
455
Gurchetan Singh298b7572019-09-19 09:55:18 -0700456 bo->meta.total_size += bo->meta.sizes[plane];
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700457 }
458
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700459 return bo;
Gurchetan Singhc26fd1e2017-09-29 10:18:59 -0700460
461destroy_bo:
462 drv_bo_destroy(bo);
463 return NULL;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700464}
465
Gurchetan Singh1ef809e2017-11-06 11:07:52 -0800466void *drv_bo_map(struct bo *bo, const struct rectangle *rect, uint32_t map_flags,
467 struct mapping **map_data, size_t plane)
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700468{
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000469 struct driver *drv = bo->drv;
Gurchetan Singhcfedbcc2017-11-02 17:32:00 -0700470 uint32_t i;
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700471 uint8_t *addr;
Gurchetan Singh99644382020-10-07 15:28:11 -0700472 struct mapping mapping = { 0 };
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700473
Gurchetan Singh1ef809e2017-11-06 11:07:52 -0800474 assert(rect->width >= 0);
475 assert(rect->height >= 0);
476 assert(rect->x + rect->width <= drv_bo_get_width(bo));
477 assert(rect->y + rect->height <= drv_bo_get_height(bo));
Gurchetan Singhf7f633a2017-09-28 17:02:12 -0700478 assert(BO_MAP_READ_WRITE & map_flags);
Tomasz Figae0807b12017-08-04 12:50:03 +0900479 /* No CPU access for protected buffers. */
Gurchetan Singh298b7572019-09-19 09:55:18 -0700480 assert(!(bo->meta.use_flags & BO_USE_PROTECTED));
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700481
Gurchetan Singhcadc54f2021-02-01 12:03:11 -0800482 if (bo->is_test_buffer)
David Stevens26fe6822020-03-09 12:23:42 +0000483 return MAP_FAILED;
David Stevens26fe6822020-03-09 12:23:42 +0000484
Gurchetan Singh1ef809e2017-11-06 11:07:52 -0800485 mapping.rect = *rect;
486 mapping.refcount = 1;
487
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000488 pthread_mutex_lock(&drv->mappings_lock);
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700489
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000490 for (i = 0; i < drv_array_size(drv->mappings); i++) {
491 struct mapping *prior = (struct mapping *)drv_array_at_idx(drv->mappings, i);
Gurchetan Singhcfedbcc2017-11-02 17:32:00 -0700492 if (prior->vma->handle != bo->handles[plane].u32 ||
493 prior->vma->map_flags != map_flags)
494 continue;
495
Gurchetan Singh1ef809e2017-11-06 11:07:52 -0800496 if (rect->x != prior->rect.x || rect->y != prior->rect.y ||
497 rect->width != prior->rect.width || rect->height != prior->rect.height)
498 continue;
499
500 prior->refcount++;
501 *map_data = prior;
502 goto exact_match;
503 }
504
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000505 for (i = 0; i < drv_array_size(drv->mappings); i++) {
506 struct mapping *prior = (struct mapping *)drv_array_at_idx(drv->mappings, i);
Gurchetan Singh1ef809e2017-11-06 11:07:52 -0800507 if (prior->vma->handle != bo->handles[plane].u32 ||
508 prior->vma->map_flags != map_flags)
509 continue;
510
Gurchetan Singhcfedbcc2017-11-02 17:32:00 -0700511 prior->vma->refcount++;
512 mapping.vma = prior->vma;
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700513 goto success;
514 }
515
Gurchetan Singhcfedbcc2017-11-02 17:32:00 -0700516 mapping.vma = calloc(1, sizeof(*mapping.vma));
Yiwei Zhangafdf87d2021-09-28 04:06:06 +0000517 if (!mapping.vma) {
518 *map_data = NULL;
519 pthread_mutex_unlock(&drv->mappings_lock);
520 return MAP_FAILED;
521 }
522
Gurchetan Singh298b7572019-09-19 09:55:18 -0700523 memcpy(mapping.vma->map_strides, bo->meta.strides, sizeof(mapping.vma->map_strides));
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000524 addr = drv->backend->bo_map(bo, mapping.vma, plane, map_flags);
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700525 if (addr == MAP_FAILED) {
526 *map_data = NULL;
Gurchetan Singhcfedbcc2017-11-02 17:32:00 -0700527 free(mapping.vma);
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000528 pthread_mutex_unlock(&drv->mappings_lock);
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700529 return MAP_FAILED;
530 }
531
Gurchetan Singhcfedbcc2017-11-02 17:32:00 -0700532 mapping.vma->refcount = 1;
533 mapping.vma->addr = addr;
534 mapping.vma->handle = bo->handles[plane].u32;
535 mapping.vma->map_flags = map_flags;
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700536
537success:
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000538 *map_data = drv_array_append(drv->mappings, &mapping);
Gurchetan Singh1ef809e2017-11-06 11:07:52 -0800539exact_match:
540 drv_bo_invalidate(bo, *map_data);
541 addr = (uint8_t *)((*map_data)->vma->addr);
542 addr += drv_bo_get_plane_offset(bo, plane);
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000543 pthread_mutex_unlock(&drv->mappings_lock);
Gurchetan Singh1b1d56a2017-03-10 16:25:23 -0800544 return (void *)addr;
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700545}
546
Gurchetan Singh47e629b2017-11-02 14:07:18 -0700547int drv_bo_unmap(struct bo *bo, struct mapping *mapping)
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700548{
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000549 struct driver *drv = bo->drv;
Gurchetan Singhcfedbcc2017-11-02 17:32:00 -0700550 uint32_t i;
Gurchetan Singhbd1b1b52018-03-29 16:34:53 -0700551 int ret = 0;
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700552
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000553 pthread_mutex_lock(&drv->mappings_lock);
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700554
Gurchetan Singh1ef809e2017-11-06 11:07:52 -0800555 if (--mapping->refcount)
556 goto out;
557
Gurchetan Singh47e629b2017-11-02 14:07:18 -0700558 if (!--mapping->vma->refcount) {
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000559 ret = drv->backend->bo_unmap(bo, mapping->vma);
Gurchetan Singh47e629b2017-11-02 14:07:18 -0700560 free(mapping->vma);
Gurchetan Singhcfedbcc2017-11-02 17:32:00 -0700561 }
562
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000563 for (i = 0; i < drv_array_size(drv->mappings); i++) {
564 if (mapping == (struct mapping *)drv_array_at_idx(drv->mappings, i)) {
565 drv_array_remove(drv->mappings, i);
Gurchetan Singhcfedbcc2017-11-02 17:32:00 -0700566 break;
567 }
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700568 }
569
Gurchetan Singh1ef809e2017-11-06 11:07:52 -0800570out:
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000571 pthread_mutex_unlock(&drv->mappings_lock);
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700572 return ret;
573}
574
Gurchetan Singh47e629b2017-11-02 14:07:18 -0700575int drv_bo_invalidate(struct bo *bo, struct mapping *mapping)
Gurchetan Singhc2ad63e2017-10-09 17:59:47 -0700576{
577 int ret = 0;
Gurchetan Singh47e629b2017-11-02 14:07:18 -0700578
579 assert(mapping);
580 assert(mapping->vma);
Gurchetan Singh1ef809e2017-11-06 11:07:52 -0800581 assert(mapping->refcount > 0);
Gurchetan Singh47e629b2017-11-02 14:07:18 -0700582 assert(mapping->vma->refcount > 0);
Gurchetan Singhc2ad63e2017-10-09 17:59:47 -0700583
584 if (bo->drv->backend->bo_invalidate)
Gurchetan Singh47e629b2017-11-02 14:07:18 -0700585 ret = bo->drv->backend->bo_invalidate(bo, mapping);
Gurchetan Singhc2ad63e2017-10-09 17:59:47 -0700586
587 return ret;
588}
589
Jason Macnak1de7f662020-01-24 15:05:57 -0800590int drv_bo_flush(struct bo *bo, struct mapping *mapping)
591{
592 int ret = 0;
593
594 assert(mapping);
595 assert(mapping->vma);
596 assert(mapping->refcount > 0);
597 assert(mapping->vma->refcount > 0);
598
599 if (bo->drv->backend->bo_flush)
600 ret = bo->drv->backend->bo_flush(bo, mapping);
601
602 return ret;
603}
604
Gurchetan Singhbd1b1b52018-03-29 16:34:53 -0700605int drv_bo_flush_or_unmap(struct bo *bo, struct mapping *mapping)
Gurchetan Singhff741412017-09-13 17:54:36 -0700606{
607 int ret = 0;
Gurchetan Singh47e629b2017-11-02 14:07:18 -0700608
609 assert(mapping);
610 assert(mapping->vma);
Gurchetan Singh1ef809e2017-11-06 11:07:52 -0800611 assert(mapping->refcount > 0);
Gurchetan Singh47e629b2017-11-02 14:07:18 -0700612 assert(mapping->vma->refcount > 0);
Gurchetan Singh298b7572019-09-19 09:55:18 -0700613 assert(!(bo->meta.use_flags & BO_USE_PROTECTED));
Gurchetan Singhff741412017-09-13 17:54:36 -0700614
615 if (bo->drv->backend->bo_flush)
Gurchetan Singh47e629b2017-11-02 14:07:18 -0700616 ret = bo->drv->backend->bo_flush(bo, mapping);
Gurchetan Singhbd1b1b52018-03-29 16:34:53 -0700617 else
618 ret = drv_bo_unmap(bo, mapping);
Gurchetan Singhff741412017-09-13 17:54:36 -0700619
620 return ret;
621}
622
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700623uint32_t drv_bo_get_width(struct bo *bo)
624{
Gurchetan Singh298b7572019-09-19 09:55:18 -0700625 return bo->meta.width;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700626}
627
628uint32_t drv_bo_get_height(struct bo *bo)
629{
Gurchetan Singh298b7572019-09-19 09:55:18 -0700630 return bo->meta.height;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700631}
632
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700633size_t drv_bo_get_num_planes(struct bo *bo)
634{
Gurchetan Singh298b7572019-09-19 09:55:18 -0700635 return bo->meta.num_planes;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700636}
637
638union bo_handle drv_bo_get_plane_handle(struct bo *bo, size_t plane)
639{
640 return bo->handles[plane];
641}
642
643#ifndef DRM_RDWR
644#define DRM_RDWR O_RDWR
645#endif
646
647int drv_bo_get_plane_fd(struct bo *bo, size_t plane)
648{
649
650 int ret, fd;
Gurchetan Singh298b7572019-09-19 09:55:18 -0700651 assert(plane < bo->meta.num_planes);
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700652
Gurchetan Singhcadc54f2021-02-01 12:03:11 -0800653 if (bo->is_test_buffer)
David Stevens26fe6822020-03-09 12:23:42 +0000654 return -EINVAL;
David Stevens26fe6822020-03-09 12:23:42 +0000655
Gurchetan Singh1b1d56a2017-03-10 16:25:23 -0800656 ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32, DRM_CLOEXEC | DRM_RDWR, &fd);
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700657
Alistair Strachanf048a1e2018-03-20 11:10:51 -0700658 // Older DRM implementations blocked DRM_RDWR, but gave a read/write mapping anyways
659 if (ret)
660 ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32, DRM_CLOEXEC, &fd);
661
Jason Macnak166fe142021-01-29 07:50:34 -0800662 if (ret)
663 drv_log("Failed to get plane fd: %s\n", strerror(errno));
664
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700665 return (ret) ? ret : fd;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700666}
667
668uint32_t drv_bo_get_plane_offset(struct bo *bo, size_t plane)
669{
Gurchetan Singh298b7572019-09-19 09:55:18 -0700670 assert(plane < bo->meta.num_planes);
671 return bo->meta.offsets[plane];
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700672}
673
674uint32_t drv_bo_get_plane_size(struct bo *bo, size_t plane)
675{
Gurchetan Singh298b7572019-09-19 09:55:18 -0700676 assert(plane < bo->meta.num_planes);
677 return bo->meta.sizes[plane];
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700678}
679
680uint32_t drv_bo_get_plane_stride(struct bo *bo, size_t plane)
681{
Gurchetan Singh298b7572019-09-19 09:55:18 -0700682 assert(plane < bo->meta.num_planes);
683 return bo->meta.strides[plane];
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700684}
685
Gurchetan Singh52155b42021-01-27 17:55:17 -0800686uint64_t drv_bo_get_format_modifier(struct bo *bo)
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700687{
Gurchetan Singh52155b42021-01-27 17:55:17 -0800688 return bo->meta.format_modifier;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700689}
Gurchetan Singhbfba8c22016-08-16 17:57:10 -0700690
Gurchetan Singhf3b22da2016-11-21 10:46:38 -0800691uint32_t drv_bo_get_format(struct bo *bo)
Gurchetan Singh2e786ad2016-08-24 18:31:23 -0700692{
Gurchetan Singh298b7572019-09-19 09:55:18 -0700693 return bo->meta.format;
Gurchetan Singh2e786ad2016-08-24 18:31:23 -0700694}
695
Yiwei Zhang1f9b9002021-09-15 21:28:51 +0000696uint32_t drv_bo_get_tiling(struct bo *bo)
697{
698 return bo->meta.tiling;
699}
700
701uint64_t drv_bo_get_use_flags(struct bo *bo)
702{
703 return bo->meta.use_flags;
704}
705
Jason Macnak1de7f662020-01-24 15:05:57 -0800706size_t drv_bo_get_total_size(struct bo *bo)
707{
708 return bo->meta.total_size;
709}
710
Yiwei Zhangb7a64442021-09-30 05:13:10 +0000711/*
712 * Map internal fourcc codes back to standard fourcc codes.
713 */
714uint32_t drv_get_standard_fourcc(uint32_t fourcc_internal)
715{
716 return (fourcc_internal == DRM_FORMAT_YVU420_ANDROID) ? DRM_FORMAT_YVU420 : fourcc_internal;
717}
718
Yiwei Zhangb8ad7b82021-10-01 17:55:14 +0000719void drv_resolve_format_and_use_flags(struct driver *drv, uint32_t format, uint64_t use_flags,
720 uint32_t *out_format, uint64_t *out_use_flags)
Gurchetan Singhbfba8c22016-08-16 17:57:10 -0700721{
Yiwei Zhangb8ad7b82021-10-01 17:55:14 +0000722 assert(drv->backend->resolve_format_and_use_flags);
Gurchetan Singhbfba8c22016-08-16 17:57:10 -0700723
Yiwei Zhangb8ad7b82021-10-01 17:55:14 +0000724 drv->backend->resolve_format_and_use_flags(drv, format, use_flags, out_format,
725 out_use_flags);
Yiwei Zhangc1413ea2021-09-17 08:20:21 +0000726}
727
Gurchetan Singh2e786ad2016-08-24 18:31:23 -0700728uint32_t drv_num_buffers_per_bo(struct bo *bo)
729{
730 uint32_t count = 0;
731 size_t plane, p;
732
Gurchetan Singhcadc54f2021-02-01 12:03:11 -0800733 if (bo->is_test_buffer)
David Stevens26fe6822020-03-09 12:23:42 +0000734 return 0;
David Stevens26fe6822020-03-09 12:23:42 +0000735
Gurchetan Singh298b7572019-09-19 09:55:18 -0700736 for (plane = 0; plane < bo->meta.num_planes; plane++) {
Gurchetan Singh2e786ad2016-08-24 18:31:23 -0700737 for (p = 0; p < plane; p++)
738 if (bo->handles[p].u32 == bo->handles[plane].u32)
739 break;
740 if (p == plane)
741 count++;
742 }
743
744 return count;
745}
Alistair Strachan0cfaaa52018-03-19 14:03:23 -0700746
747void drv_log_prefix(const char *prefix, const char *file, int line, const char *format, ...)
748{
749 char buf[50];
750 snprintf(buf, sizeof(buf), "[%s:%s(%d)]", prefix, basename(file), line);
751
752 va_list args;
753 va_start(args, format);
754#ifdef __ANDROID__
755 __android_log_vprint(ANDROID_LOG_ERROR, buf, format, args);
756#else
757 fprintf(stderr, "%s ", buf);
758 vfprintf(stderr, format, args);
759#endif
760 va_end(args);
761}
Gurchetan Singhbc4f0232019-06-27 20:05:54 -0700762
763int drv_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
Yiwei Zhanga1e93fd2021-04-30 07:01:55 +0000764 uint32_t offsets[DRV_MAX_PLANES], uint64_t *format_modifier)
Gurchetan Singhbc4f0232019-06-27 20:05:54 -0700765{
766 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) {
767 strides[plane] = bo->meta.strides[plane];
768 offsets[plane] = bo->meta.offsets[plane];
769 }
Yiwei Zhanga1e93fd2021-04-30 07:01:55 +0000770 *format_modifier = bo->meta.format_modifier;
Gurchetan Singhbc4f0232019-06-27 20:05:54 -0700771
772 if (bo->drv->backend->resource_info)
Yiwei Zhanga1e93fd2021-04-30 07:01:55 +0000773 return bo->drv->backend->resource_info(bo, strides, offsets, format_modifier);
Gurchetan Singhbc4f0232019-06-27 20:05:54 -0700774
775 return 0;
776}
Jason Macnak336fd052021-09-29 11:10:06 -0700777
778uint32_t drv_get_max_texture_2d_size(struct driver *drv)
779{
780 if (drv->backend->get_max_texture_2d_size)
781 return drv->backend->get_max_texture_2d_size(drv);
782
783 return UINT32_MAX;
784}