blob: d1702a7b60a10bf559bb1677c76e5e54d652f400 [file] [log] [blame]
Gurchetan Singh73c141e2021-01-21 14:51:19 -08001/*
2 * Copyright 2021 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
5 */
6
7#include <errno.h>
8#include <string.h>
9#include <sys/mman.h>
10#include <xf86drm.h>
11
12#include "drv_priv.h"
13#include "external/virtgpu_cross_domain_protocol.h"
14#include "external/virtgpu_drm.h"
15#include "helpers.h"
16#include "util.h"
17#include "virtgpu.h"
18
19#define CAPSET_CROSS_DOMAIN 5
20#define CAPSET_CROSS_FAKE 30
21
22static const uint32_t scanout_render_formats[] = { DRM_FORMAT_ABGR2101010, DRM_FORMAT_ABGR8888,
23 DRM_FORMAT_ARGB2101010, DRM_FORMAT_ARGB8888,
24 DRM_FORMAT_RGB565, DRM_FORMAT_XBGR2101010,
25 DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB2101010,
26 DRM_FORMAT_XRGB8888 };
27
28static const uint32_t render_formats[] = { DRM_FORMAT_ABGR16161616F };
29
30static const uint32_t texture_only_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_NV12, DRM_FORMAT_P010,
31 DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID };
32
33extern struct virtgpu_param params[];
34
35struct cross_domain_private {
36 uint32_t ring_handle;
37 void *ring_addr;
38 struct drv_array *metadata_cache;
39};
40
41static void cross_domain_release_private(struct driver *drv)
42{
43 int ret;
44 struct cross_domain_private *priv = drv->priv;
45 struct drm_gem_close gem_close = { 0 };
46
47 if (priv->ring_addr != MAP_FAILED)
48 munmap(priv->ring_addr, PAGE_SIZE);
49
50 if (priv->ring_handle) {
51 gem_close.handle = priv->ring_handle;
52
53 ret = drmIoctl(drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
54 if (ret) {
55 drv_log("DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n",
56 priv->ring_handle, ret);
57 }
58 }
59
60 drv_array_destroy(priv->metadata_cache);
61 free(priv);
62}
63
64static void add_combinations(struct driver *drv)
65{
66 struct format_metadata metadata;
67
68 // Linear metadata always supported.
69 metadata.tiling = 0;
70 metadata.priority = 1;
71 metadata.modifier = DRM_FORMAT_MOD_LINEAR;
72
73 drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats),
74 &metadata, BO_USE_RENDER_MASK | BO_USE_SCANOUT);
75
76 drv_add_combinations(drv, render_formats, ARRAY_SIZE(render_formats), &metadata,
77 BO_USE_RENDER_MASK);
78
79 drv_add_combinations(drv, texture_only_formats, ARRAY_SIZE(texture_only_formats), &metadata,
80 BO_USE_TEXTURE_MASK);
81
82 /* Android CTS tests require this. */
83 drv_add_combination(drv, DRM_FORMAT_BGR888, &metadata, BO_USE_SW_MASK);
84
85 drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata, BO_USE_HW_VIDEO_ENCODER);
86 drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
87 BO_USE_HW_VIDEO_DECODER | BO_USE_SCANOUT | BO_USE_HW_VIDEO_ENCODER);
88
89 /*
90 * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
91 * from camera and input/output from hardware decoder/encoder.
92 */
93 drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
94 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
95 BO_USE_HW_VIDEO_ENCODER);
96
97 drv_modify_linear_combinations(drv);
98}
99
100static int cross_domain_submit_cmd(struct driver *drv, uint32_t *cmd, uint32_t cmd_size, bool wait)
101{
102 int ret;
103 struct drm_virtgpu_3d_wait wait_3d = { 0 };
104 struct drm_virtgpu_execbuffer exec = { 0 };
105 struct cross_domain_private *priv = drv->priv;
106
107 exec.command = (uint64_t)&cmd[0];
108 exec.size = cmd_size;
109 if (wait) {
110 exec.flags = VIRTGPU_EXECBUF_FENCE_CONTEXT;
111 exec.bo_handles = (uint64_t)&priv->ring_handle;
112 exec.num_bo_handles = 1;
113 }
114
115 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &exec);
116 if (ret < 0) {
117 drv_log("DRM_IOCTL_VIRTGPU_EXECBUFFER failed with %s\n", strerror(errno));
118 return -EINVAL;
119 }
120
121 ret = -EAGAIN;
122 while (ret == -EAGAIN) {
123 wait_3d.handle = priv->ring_handle;
124 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &wait_3d);
125 }
126
127 if (ret < 0) {
128 drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
129 return ret;
130 }
131
132 return 0;
133}
134
135static bool metadata_equal(struct bo_metadata *current, struct bo_metadata *cached)
136{
137 if ((current->width == cached->width) && (current->height == cached->height) &&
138 (current->format == cached->format) && (current->use_flags == cached->use_flags))
139 return true;
140 return false;
141}
142
143static int cross_domain_metadata_query(struct driver *drv, struct bo_metadata *metadata)
144{
145 int ret = 0;
146 struct bo_metadata *cached_data = NULL;
147 struct cross_domain_private *priv = drv->priv;
148 struct CrossDomainGetImageRequirements cmd_get_reqs;
149 uint32_t *addr = (uint32_t *)priv->ring_addr;
150 uint32_t plane, remaining_size;
151
152 memset(&cmd_get_reqs, 0, sizeof(cmd_get_reqs));
153 pthread_mutex_lock(&drv->driver_lock);
154 for (uint32_t i = 0; i < drv_array_size(priv->metadata_cache); i++) {
155 cached_data = (struct bo_metadata *)drv_array_at_idx(priv->metadata_cache, i);
156 if (!metadata_equal(metadata, cached_data))
157 continue;
158
159 memcpy(metadata, cached_data, sizeof(*cached_data));
160 goto out_unlock;
161 }
162
163 cmd_get_reqs.hdr.cmd = CROSS_DOMAIN_CMD_GET_IMAGE_REQUIREMENTS;
164 cmd_get_reqs.hdr.cmd_size = sizeof(struct CrossDomainGetImageRequirements);
165
166 cmd_get_reqs.width = metadata->width;
167 cmd_get_reqs.height = metadata->height;
168 cmd_get_reqs.drm_format =
169 (metadata->format == DRM_FORMAT_YVU420_ANDROID) ? DRM_FORMAT_YVU420 : metadata->format;
170 cmd_get_reqs.flags = metadata->use_flags;
171
172 /*
173 * It is possible to avoid blocking other bo_create() calls by unlocking before
174 * cross_domain_submit_cmd() and re-locking afterwards. However, that would require
175 * another scan of the metadata cache before drv_array_append in case two bo_create() calls
176 * do the same metadata query. Until cross_domain functionality is more widely tested,
177 * leave this optimization out for now.
178 */
179 ret = cross_domain_submit_cmd(drv, (uint32_t *)&cmd_get_reqs, cmd_get_reqs.hdr.cmd_size,
180 true);
181 if (ret < 0)
182 goto out_unlock;
183
184 memcpy(&metadata->strides, &addr[0], 4 * sizeof(uint32_t));
185 memcpy(&metadata->offsets, &addr[4], 4 * sizeof(uint32_t));
186 memcpy(&metadata->format_modifier, &addr[8], sizeof(uint64_t));
187 memcpy(&metadata->total_size, &addr[10], sizeof(uint64_t));
188 memcpy(&metadata->blob_id, &addr[12], sizeof(uint64_t));
189
190 metadata->map_info = addr[14];
191 metadata->memory_idx = addr[16];
192 metadata->physical_device_idx = addr[17];
193
194 remaining_size = metadata->total_size;
195 for (plane = 0; plane < metadata->num_planes; plane++) {
196 if (plane != 0) {
197 metadata->sizes[plane - 1] = metadata->offsets[plane];
198 remaining_size -= metadata->offsets[plane];
199 }
200 }
201
202 metadata->sizes[plane - 1] = remaining_size;
203 drv_array_append(priv->metadata_cache, metadata);
204
205out_unlock:
206 pthread_mutex_unlock(&drv->driver_lock);
207 return ret;
208}
209
210static int cross_domain_init(struct driver *drv)
211{
212 int ret;
213 struct cross_domain_private *priv;
214 struct drm_virtgpu_map map = { 0 };
215 struct drm_virtgpu_get_caps args = { 0 };
216 struct drm_virtgpu_context_init init = { 0 };
217 struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
218 struct drm_virtgpu_context_set_param ctx_set_params[2] = { { 0 } };
219
220 struct CrossDomainInit cmd_init;
221 struct CrossDomainCapabilities cross_domain_caps;
222
223 memset(&cmd_init, 0, sizeof(cmd_init));
224 if (!params[param_context_init].value)
225 return -ENOTSUP;
226
227 if ((params[param_supported_capset_ids].value & (1 << CAPSET_CROSS_DOMAIN)) == 0)
228 return -ENOTSUP;
229
230 /*
231 * crosvm never reports the fake capset. This is just an extra check to make sure we
232 * don't use the cross-domain context by accident. Developers may remove this for
233 * testing purposes.
234 */
235 if ((params[param_supported_capset_ids].value & (1 << CAPSET_CROSS_FAKE)) == 0)
236 return -ENOTSUP;
237
238 priv = calloc(1, sizeof(*priv));
239 priv->metadata_cache = drv_array_init(sizeof(struct bo_metadata));
240 priv->ring_addr = MAP_FAILED;
241 drv->priv = priv;
242
243 args.cap_set_id = CAPSET_CROSS_DOMAIN;
244 args.size = sizeof(struct CrossDomainCapabilities);
245 args.addr = (unsigned long long)&cross_domain_caps;
246
247 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
248 if (ret) {
249 drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
250 goto free_private;
251 }
252
253 // When 3D features are avilable, but the host does not support external memory, fall back
254 // to the virgl minigbm backend. This typically means the guest side minigbm resource will
255 // be backed by a host OpenGL texture.
256 if (!cross_domain_caps.supports_external_gpu_memory && params[param_3d].value) {
257 ret = -ENOTSUP;
258 goto free_private;
259 }
260
261 // Intialize the cross domain context. Create one fence context to wait for metadata
262 // queries.
263 ctx_set_params[0].param = VIRTGPU_CONTEXT_PARAM_CAPSET_ID;
264 ctx_set_params[0].value = CAPSET_CROSS_DOMAIN;
265 ctx_set_params[1].param = VIRTGPU_CONTEXT_PARAM_NUM_FENCE_CONTEXTS;
266 ctx_set_params[1].value = 1;
267
268 init.ctx_set_params = (unsigned long long)&ctx_set_params[0];
269 init.num_params = 2;
270 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_CONTEXT_INIT, &init);
271 if (ret) {
272 drv_log("DRM_IOCTL_VIRTGPU_CONTEXT_INIT failed with %s\n", strerror(errno));
273 goto free_private;
274 }
275
276 // Create a shared ring buffer to read metadata queries.
277 drm_rc_blob.size = PAGE_SIZE;
278 drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
279 drm_rc_blob.blob_flags = VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
280
281 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
282 if (ret < 0) {
283 drv_log("DRM_VIRTGPU_RESOURCE_CREATE_BLOB failed with %s\n", strerror(errno));
284 goto free_private;
285 }
286
287 priv->ring_handle = drm_rc_blob.bo_handle;
288
289 // Map shared ring buffer.
290 map.handle = priv->ring_handle;
291 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_MAP, &map);
292 if (ret < 0) {
293 drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
294 goto free_private;
295 }
296
297 priv->ring_addr =
298 mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, drv->fd, map.offset);
299
300 if (priv->ring_addr == MAP_FAILED) {
301 drv_log("mmap failed with %s\n", strerror(errno));
302 goto free_private;
303 }
304
305 // Notify host about ring buffer
306 cmd_init.hdr.cmd = CROSS_DOMAIN_CMD_INIT;
307 cmd_init.hdr.cmd_size = sizeof(struct CrossDomainInit);
308 cmd_init.ring_id = drm_rc_blob.res_handle;
309 ret = cross_domain_submit_cmd(drv, (uint32_t *)&cmd_init, cmd_init.hdr.cmd_size, false);
310 if (ret < 0)
311 goto free_private;
312
313 // minigbm bookkeeping
314 add_combinations(drv);
315 return 0;
316
317free_private:
318 cross_domain_release_private(drv);
319 return ret;
320}
321
322static void cross_domain_close(struct driver *drv)
323{
324 cross_domain_release_private(drv);
325}
326
327static int cross_domain_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
328 uint64_t use_flags)
329{
330 int ret;
331 uint32_t blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
332 struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
333
334 ret = cross_domain_metadata_query(bo->drv, &bo->meta);
335 if (ret < 0) {
336 drv_log("Metadata query failed");
337 return ret;
338 }
339
340 if (use_flags & BO_USE_SW_MASK)
341 blob_flags |= VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
342
343 if (params[param_cross_device].value && (use_flags & BO_USE_NON_GPU_HW))
344 blob_flags |= VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE;
345
346 drm_rc_blob.size = bo->meta.total_size;
347 drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST3D;
348 drm_rc_blob.blob_flags = blob_flags;
349 drm_rc_blob.blob_id = bo->meta.blob_id;
350
351 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
352 if (ret < 0) {
353 drv_log("DRM_VIRTGPU_RESOURCE_CREATE_BLOB failed with %s\n", strerror(errno));
354 return -errno;
355 }
356
357 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
358 bo->handles[plane].u32 = drm_rc_blob.bo_handle;
359
360 return 0;
361}
362
363static void *cross_domain_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
364{
365 int ret;
366 struct drm_virtgpu_map gem_map = { 0 };
367
368 gem_map.handle = bo->handles[0].u32;
369 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map);
370 if (ret) {
371 drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
372 return MAP_FAILED;
373 }
374
375 vma->length = bo->meta.total_size;
376 return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
377 gem_map.offset);
378}
379
380const struct backend virtgpu_cross_domain = {
381 .name = "virtgpu_cross_domain",
382 .init = cross_domain_init,
383 .close = cross_domain_close,
384 .bo_create = cross_domain_bo_create,
385 .bo_import = drv_prime_bo_import,
386 .bo_destroy = drv_gem_bo_destroy,
387 .bo_map = cross_domain_bo_map,
388 .bo_unmap = drv_bo_munmap,
389 .resolve_format = drv_resolve_format_helper,
390};