blob: 10930fc10464fbb0824559ec2c8f6a42934ba7e9 [file] [log] [blame]
Gurchetan Singh73c141e2021-01-21 14:51:19 -08001/*
2 * Copyright 2021 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
5 */
6
7#include <errno.h>
8#include <string.h>
9#include <sys/mman.h>
10#include <xf86drm.h>
11
Yiwei Zhangb7a64442021-09-30 05:13:10 +000012#include "drv_helpers.h"
Gurchetan Singh73c141e2021-01-21 14:51:19 -080013#include "drv_priv.h"
14#include "external/virtgpu_cross_domain_protocol.h"
15#include "external/virtgpu_drm.h"
Gurchetan Singh73c141e2021-01-21 14:51:19 -080016#include "util.h"
17#include "virtgpu.h"
18
19#define CAPSET_CROSS_DOMAIN 5
20#define CAPSET_CROSS_FAKE 30
21
22static const uint32_t scanout_render_formats[] = { DRM_FORMAT_ABGR2101010, DRM_FORMAT_ABGR8888,
23 DRM_FORMAT_ARGB2101010, DRM_FORMAT_ARGB8888,
24 DRM_FORMAT_RGB565, DRM_FORMAT_XBGR2101010,
25 DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB2101010,
26 DRM_FORMAT_XRGB8888 };
27
28static const uint32_t render_formats[] = { DRM_FORMAT_ABGR16161616F };
29
30static const uint32_t texture_only_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_NV12, DRM_FORMAT_P010,
31 DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID };
32
33extern struct virtgpu_param params[];
34
35struct cross_domain_private {
36 uint32_t ring_handle;
37 void *ring_addr;
38 struct drv_array *metadata_cache;
Yiwei Zhange12d3ae2021-09-27 19:58:56 +000039 pthread_mutex_t metadata_cache_lock;
Gurchetan Singh73c141e2021-01-21 14:51:19 -080040};
41
42static void cross_domain_release_private(struct driver *drv)
43{
44 int ret;
45 struct cross_domain_private *priv = drv->priv;
46 struct drm_gem_close gem_close = { 0 };
47
48 if (priv->ring_addr != MAP_FAILED)
49 munmap(priv->ring_addr, PAGE_SIZE);
50
51 if (priv->ring_handle) {
52 gem_close.handle = priv->ring_handle;
53
54 ret = drmIoctl(drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
55 if (ret) {
56 drv_log("DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n",
57 priv->ring_handle, ret);
58 }
59 }
60
Yiwei Zhange12d3ae2021-09-27 19:58:56 +000061 if (priv->metadata_cache)
62 drv_array_destroy(priv->metadata_cache);
63
64 pthread_mutex_destroy(&priv->metadata_cache_lock);
65
Gurchetan Singh73c141e2021-01-21 14:51:19 -080066 free(priv);
67}
68
69static void add_combinations(struct driver *drv)
70{
71 struct format_metadata metadata;
72
73 // Linear metadata always supported.
74 metadata.tiling = 0;
75 metadata.priority = 1;
76 metadata.modifier = DRM_FORMAT_MOD_LINEAR;
77
78 drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats),
79 &metadata, BO_USE_RENDER_MASK | BO_USE_SCANOUT);
80
81 drv_add_combinations(drv, render_formats, ARRAY_SIZE(render_formats), &metadata,
82 BO_USE_RENDER_MASK);
83
84 drv_add_combinations(drv, texture_only_formats, ARRAY_SIZE(texture_only_formats), &metadata,
85 BO_USE_TEXTURE_MASK);
86
87 /* Android CTS tests require this. */
88 drv_add_combination(drv, DRM_FORMAT_BGR888, &metadata, BO_USE_SW_MASK);
89
90 drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata, BO_USE_HW_VIDEO_ENCODER);
91 drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
92 BO_USE_HW_VIDEO_DECODER | BO_USE_SCANOUT | BO_USE_HW_VIDEO_ENCODER);
93
94 /*
95 * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
96 * from camera and input/output from hardware decoder/encoder.
97 */
98 drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
99 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
100 BO_USE_HW_VIDEO_ENCODER);
101
102 drv_modify_linear_combinations(drv);
103}
104
105static int cross_domain_submit_cmd(struct driver *drv, uint32_t *cmd, uint32_t cmd_size, bool wait)
106{
107 int ret;
108 struct drm_virtgpu_3d_wait wait_3d = { 0 };
109 struct drm_virtgpu_execbuffer exec = { 0 };
110 struct cross_domain_private *priv = drv->priv;
111
112 exec.command = (uint64_t)&cmd[0];
113 exec.size = cmd_size;
114 if (wait) {
Gurchetan Singh4e767d32021-08-25 10:24:50 -0700115 exec.flags = VIRTGPU_EXECBUF_RING_IDX;
Gurchetan Singh73c141e2021-01-21 14:51:19 -0800116 exec.bo_handles = (uint64_t)&priv->ring_handle;
117 exec.num_bo_handles = 1;
118 }
119
120 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &exec);
121 if (ret < 0) {
122 drv_log("DRM_IOCTL_VIRTGPU_EXECBUFFER failed with %s\n", strerror(errno));
123 return -EINVAL;
124 }
125
126 ret = -EAGAIN;
127 while (ret == -EAGAIN) {
128 wait_3d.handle = priv->ring_handle;
129 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &wait_3d);
130 }
131
132 if (ret < 0) {
133 drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
134 return ret;
135 }
136
137 return 0;
138}
139
140static bool metadata_equal(struct bo_metadata *current, struct bo_metadata *cached)
141{
142 if ((current->width == cached->width) && (current->height == cached->height) &&
143 (current->format == cached->format) && (current->use_flags == cached->use_flags))
144 return true;
145 return false;
146}
147
148static int cross_domain_metadata_query(struct driver *drv, struct bo_metadata *metadata)
149{
150 int ret = 0;
151 struct bo_metadata *cached_data = NULL;
152 struct cross_domain_private *priv = drv->priv;
153 struct CrossDomainGetImageRequirements cmd_get_reqs;
154 uint32_t *addr = (uint32_t *)priv->ring_addr;
155 uint32_t plane, remaining_size;
156
157 memset(&cmd_get_reqs, 0, sizeof(cmd_get_reqs));
Yiwei Zhange12d3ae2021-09-27 19:58:56 +0000158 pthread_mutex_lock(&priv->metadata_cache_lock);
Gurchetan Singh73c141e2021-01-21 14:51:19 -0800159 for (uint32_t i = 0; i < drv_array_size(priv->metadata_cache); i++) {
160 cached_data = (struct bo_metadata *)drv_array_at_idx(priv->metadata_cache, i);
161 if (!metadata_equal(metadata, cached_data))
162 continue;
163
164 memcpy(metadata, cached_data, sizeof(*cached_data));
165 goto out_unlock;
166 }
167
168 cmd_get_reqs.hdr.cmd = CROSS_DOMAIN_CMD_GET_IMAGE_REQUIREMENTS;
169 cmd_get_reqs.hdr.cmd_size = sizeof(struct CrossDomainGetImageRequirements);
170
171 cmd_get_reqs.width = metadata->width;
172 cmd_get_reqs.height = metadata->height;
173 cmd_get_reqs.drm_format =
174 (metadata->format == DRM_FORMAT_YVU420_ANDROID) ? DRM_FORMAT_YVU420 : metadata->format;
175 cmd_get_reqs.flags = metadata->use_flags;
176
177 /*
178 * It is possible to avoid blocking other bo_create() calls by unlocking before
179 * cross_domain_submit_cmd() and re-locking afterwards. However, that would require
180 * another scan of the metadata cache before drv_array_append in case two bo_create() calls
181 * do the same metadata query. Until cross_domain functionality is more widely tested,
182 * leave this optimization out for now.
183 */
184 ret = cross_domain_submit_cmd(drv, (uint32_t *)&cmd_get_reqs, cmd_get_reqs.hdr.cmd_size,
185 true);
186 if (ret < 0)
187 goto out_unlock;
188
189 memcpy(&metadata->strides, &addr[0], 4 * sizeof(uint32_t));
190 memcpy(&metadata->offsets, &addr[4], 4 * sizeof(uint32_t));
191 memcpy(&metadata->format_modifier, &addr[8], sizeof(uint64_t));
192 memcpy(&metadata->total_size, &addr[10], sizeof(uint64_t));
Gurchetan Singh4e767d32021-08-25 10:24:50 -0700193 memcpy(&metadata->blob_id, &addr[12], sizeof(uint32_t));
Gurchetan Singh73c141e2021-01-21 14:51:19 -0800194
Gurchetan Singh4e767d32021-08-25 10:24:50 -0700195 metadata->map_info = addr[13];
196 metadata->memory_idx = addr[14];
197 metadata->physical_device_idx = addr[15];
Gurchetan Singh73c141e2021-01-21 14:51:19 -0800198
199 remaining_size = metadata->total_size;
200 for (plane = 0; plane < metadata->num_planes; plane++) {
201 if (plane != 0) {
202 metadata->sizes[plane - 1] = metadata->offsets[plane];
203 remaining_size -= metadata->offsets[plane];
204 }
205 }
206
207 metadata->sizes[plane - 1] = remaining_size;
208 drv_array_append(priv->metadata_cache, metadata);
209
210out_unlock:
Yiwei Zhange12d3ae2021-09-27 19:58:56 +0000211 pthread_mutex_unlock(&priv->metadata_cache_lock);
Gurchetan Singh73c141e2021-01-21 14:51:19 -0800212 return ret;
213}
214
215static int cross_domain_init(struct driver *drv)
216{
217 int ret;
218 struct cross_domain_private *priv;
219 struct drm_virtgpu_map map = { 0 };
220 struct drm_virtgpu_get_caps args = { 0 };
221 struct drm_virtgpu_context_init init = { 0 };
222 struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
223 struct drm_virtgpu_context_set_param ctx_set_params[2] = { { 0 } };
224
225 struct CrossDomainInit cmd_init;
226 struct CrossDomainCapabilities cross_domain_caps;
227
228 memset(&cmd_init, 0, sizeof(cmd_init));
229 if (!params[param_context_init].value)
230 return -ENOTSUP;
231
232 if ((params[param_supported_capset_ids].value & (1 << CAPSET_CROSS_DOMAIN)) == 0)
233 return -ENOTSUP;
234
Gurchetan Singhb2917b22021-04-28 16:24:49 -0700235 if (!params[param_resource_blob].value)
236 return -ENOTSUP;
237
238 /// Need zero copy memory
239 if (!params[param_host_visible].value && !params[param_create_guest_handle].value)
240 return -ENOTSUP;
241
Gurchetan Singh73c141e2021-01-21 14:51:19 -0800242 priv = calloc(1, sizeof(*priv));
Yiwei Zhangafdf87d2021-09-28 04:06:06 +0000243 if (!priv)
244 return -ENOMEM;
245
Yiwei Zhange12d3ae2021-09-27 19:58:56 +0000246 ret = pthread_mutex_init(&priv->metadata_cache_lock, NULL);
Jason Macnakaf840f02021-10-04 16:07:48 -0700247 if (ret) {
Yiwei Zhange12d3ae2021-09-27 19:58:56 +0000248 free(priv);
249 return ret;
250 }
251
Gurchetan Singh73c141e2021-01-21 14:51:19 -0800252 priv->metadata_cache = drv_array_init(sizeof(struct bo_metadata));
Yiwei Zhangafdf87d2021-09-28 04:06:06 +0000253 if (!priv->metadata_cache) {
254 ret = -ENOMEM;
255 goto free_private;
256 }
257
Gurchetan Singh73c141e2021-01-21 14:51:19 -0800258 priv->ring_addr = MAP_FAILED;
259 drv->priv = priv;
260
261 args.cap_set_id = CAPSET_CROSS_DOMAIN;
262 args.size = sizeof(struct CrossDomainCapabilities);
263 args.addr = (unsigned long long)&cross_domain_caps;
264
265 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
266 if (ret) {
267 drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
268 goto free_private;
269 }
270
271 // When 3D features are avilable, but the host does not support external memory, fall back
272 // to the virgl minigbm backend. This typically means the guest side minigbm resource will
273 // be backed by a host OpenGL texture.
274 if (!cross_domain_caps.supports_external_gpu_memory && params[param_3d].value) {
275 ret = -ENOTSUP;
276 goto free_private;
277 }
278
279 // Intialize the cross domain context. Create one fence context to wait for metadata
280 // queries.
281 ctx_set_params[0].param = VIRTGPU_CONTEXT_PARAM_CAPSET_ID;
282 ctx_set_params[0].value = CAPSET_CROSS_DOMAIN;
Gurchetan Singh4e767d32021-08-25 10:24:50 -0700283 ctx_set_params[1].param = VIRTGPU_CONTEXT_PARAM_NUM_RINGS;
Gurchetan Singh73c141e2021-01-21 14:51:19 -0800284 ctx_set_params[1].value = 1;
285
286 init.ctx_set_params = (unsigned long long)&ctx_set_params[0];
287 init.num_params = 2;
288 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_CONTEXT_INIT, &init);
289 if (ret) {
290 drv_log("DRM_IOCTL_VIRTGPU_CONTEXT_INIT failed with %s\n", strerror(errno));
291 goto free_private;
292 }
293
294 // Create a shared ring buffer to read metadata queries.
295 drm_rc_blob.size = PAGE_SIZE;
296 drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
297 drm_rc_blob.blob_flags = VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
298
299 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
300 if (ret < 0) {
301 drv_log("DRM_VIRTGPU_RESOURCE_CREATE_BLOB failed with %s\n", strerror(errno));
302 goto free_private;
303 }
304
305 priv->ring_handle = drm_rc_blob.bo_handle;
306
307 // Map shared ring buffer.
308 map.handle = priv->ring_handle;
309 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_MAP, &map);
310 if (ret < 0) {
311 drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
312 goto free_private;
313 }
314
315 priv->ring_addr =
316 mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, drv->fd, map.offset);
317
318 if (priv->ring_addr == MAP_FAILED) {
319 drv_log("mmap failed with %s\n", strerror(errno));
320 goto free_private;
321 }
322
323 // Notify host about ring buffer
324 cmd_init.hdr.cmd = CROSS_DOMAIN_CMD_INIT;
325 cmd_init.hdr.cmd_size = sizeof(struct CrossDomainInit);
326 cmd_init.ring_id = drm_rc_blob.res_handle;
327 ret = cross_domain_submit_cmd(drv, (uint32_t *)&cmd_init, cmd_init.hdr.cmd_size, false);
328 if (ret < 0)
329 goto free_private;
330
331 // minigbm bookkeeping
332 add_combinations(drv);
333 return 0;
334
335free_private:
336 cross_domain_release_private(drv);
337 return ret;
338}
339
340static void cross_domain_close(struct driver *drv)
341{
342 cross_domain_release_private(drv);
343}
344
345static int cross_domain_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
346 uint64_t use_flags)
347{
348 int ret;
349 uint32_t blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
350 struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
351
352 ret = cross_domain_metadata_query(bo->drv, &bo->meta);
353 if (ret < 0) {
354 drv_log("Metadata query failed");
355 return ret;
356 }
357
358 if (use_flags & BO_USE_SW_MASK)
359 blob_flags |= VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
360
Jason Macnakdbc63f72022-06-23 18:34:55 -0700361 if (params[param_cross_device].value)
Gurchetan Singh73c141e2021-01-21 14:51:19 -0800362 blob_flags |= VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE;
363
Gurchetan Singhb2917b22021-04-28 16:24:49 -0700364 /// It may be possible to have host3d blobs and handles from guest memory at the same time.
365 /// But for the immediate use cases, we will either have one or the other. For now, just
366 /// prefer guest memory since adding that feature is more involved (requires --udmabuf
367 /// flag to crosvm), so developers would likely test that.
368 if (params[param_create_guest_handle].value) {
369 drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
370 blob_flags |= VIRTGPU_BLOB_FLAG_CREATE_GUEST_HANDLE;
371 } else if (params[param_host_visible].value) {
372 drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST3D;
373 }
374
Gurchetan Singh73c141e2021-01-21 14:51:19 -0800375 drm_rc_blob.size = bo->meta.total_size;
Gurchetan Singh73c141e2021-01-21 14:51:19 -0800376 drm_rc_blob.blob_flags = blob_flags;
Gurchetan Singh4e767d32021-08-25 10:24:50 -0700377 drm_rc_blob.blob_id = (uint64_t)bo->meta.blob_id;
Gurchetan Singh73c141e2021-01-21 14:51:19 -0800378
379 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
380 if (ret < 0) {
381 drv_log("DRM_VIRTGPU_RESOURCE_CREATE_BLOB failed with %s\n", strerror(errno));
382 return -errno;
383 }
384
385 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
386 bo->handles[plane].u32 = drm_rc_blob.bo_handle;
387
388 return 0;
389}
390
391static void *cross_domain_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
392{
393 int ret;
394 struct drm_virtgpu_map gem_map = { 0 };
395
396 gem_map.handle = bo->handles[0].u32;
397 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map);
398 if (ret) {
399 drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
400 return MAP_FAILED;
401 }
402
403 vma->length = bo->meta.total_size;
404 return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
405 gem_map.offset);
406}
407
408const struct backend virtgpu_cross_domain = {
409 .name = "virtgpu_cross_domain",
410 .init = cross_domain_init,
411 .close = cross_domain_close,
412 .bo_create = cross_domain_bo_create,
413 .bo_import = drv_prime_bo_import,
414 .bo_destroy = drv_gem_bo_destroy,
415 .bo_map = cross_domain_bo_map,
416 .bo_unmap = drv_bo_munmap,
Yiwei Zhangb8ad7b82021-10-01 17:55:14 +0000417 .resolve_format_and_use_flags = drv_resolve_format_and_use_flags_helper,
Gurchetan Singh73c141e2021-01-21 14:51:19 -0800418};