blob: 4912af0bdc25ac96e3bbb69377360ce808d165d4 [file] [log] [blame]
Zach Reizner85c4c5f2017-10-04 13:15:57 -07001/*
2 * Copyright 2017 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
5 */
6
Zach Reizner85c4c5f2017-10-04 13:15:57 -07007#include <errno.h>
8#include <stdint.h>
9#include <stdio.h>
10#include <string.h>
11#include <sys/mman.h>
Zach Reizner85c4c5f2017-10-04 13:15:57 -070012#include <xf86drm.h>
13
14#include "drv_priv.h"
15#include "helpers.h"
16#include "util.h"
17#include "virgl_hw.h"
Gurchetan Singh69bc4302020-02-05 18:18:52 -080018#include "virtgpu_drm.h"
Zach Reizner85c4c5f2017-10-04 13:15:57 -070019
Tao Wu33815882018-03-12 18:07:43 -070020#ifndef PAGE_SIZE
Zach Reizner85c4c5f2017-10-04 13:15:57 -070021#define PAGE_SIZE 0x1000
Tao Wu33815882018-03-12 18:07:43 -070022#endif
Zach Reizner85c4c5f2017-10-04 13:15:57 -070023#define PIPE_TEXTURE_2D 2
24
Lepton Wu249e8632018-04-05 12:50:03 -070025#define MESA_LLVMPIPE_TILE_ORDER 6
26#define MESA_LLVMPIPE_TILE_SIZE (1 << MESA_LLVMPIPE_TILE_ORDER)
27
Gurchetan Singhd708f612019-09-12 17:26:45 -070028struct feature {
29 uint64_t feature;
30 const char *name;
31 uint32_t enabled;
32};
33
34enum feature_id {
35 feat_3d,
36 feat_capset_fix,
37 feat_max,
38};
39
40#define FEATURE(x) \
41 (struct feature) \
42 { \
43 x, #x, 0 \
44 }
45
46static struct feature features[] = { FEATURE(VIRTGPU_PARAM_3D_FEATURES),
47 FEATURE(VIRTGPU_PARAM_CAPSET_QUERY_FIX) };
48
Zach Reizner85c4c5f2017-10-04 13:15:57 -070049static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
Gurchetan Singh71bc6652018-09-17 17:42:05 -070050 DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
51 DRM_FORMAT_XRGB8888 };
Zach Reizner85c4c5f2017-10-04 13:15:57 -070052
Lepton Wu249e8632018-04-05 12:50:03 -070053static const uint32_t dumb_texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_YVU420,
Lepton Wu37be4362019-11-08 22:58:12 -080054 DRM_FORMAT_NV12,
Gurchetan Singh3f3e5f92019-07-08 09:50:01 -070055 DRM_FORMAT_YVU420_ANDROID };
Lepton Wu249e8632018-04-05 12:50:03 -070056
Gurchetan Singhf5d280d2019-06-04 19:43:41 -070057static const uint32_t texture_source_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_R8, DRM_FORMAT_RG88,
58 DRM_FORMAT_YVU420_ANDROID };
Zach Reizner85c4c5f2017-10-04 13:15:57 -070059
Lepton Wu249e8632018-04-05 12:50:03 -070060struct virtio_gpu_priv {
Lepton Wueebce652020-02-26 15:13:34 -080061 int caps_is_v2;
Jason Macnakddf4ec02020-02-03 16:36:46 -080062 union virgl_caps caps;
Lepton Wu249e8632018-04-05 12:50:03 -070063};
64
Kansho Nishidad97877b2019-06-14 18:28:18 +090065static uint32_t translate_format(uint32_t drm_fourcc)
Zach Reizner85c4c5f2017-10-04 13:15:57 -070066{
67 switch (drm_fourcc) {
68 case DRM_FORMAT_XRGB8888:
69 return VIRGL_FORMAT_B8G8R8X8_UNORM;
70 case DRM_FORMAT_ARGB8888:
71 return VIRGL_FORMAT_B8G8R8A8_UNORM;
72 case DRM_FORMAT_XBGR8888:
73 return VIRGL_FORMAT_R8G8B8X8_UNORM;
74 case DRM_FORMAT_ABGR8888:
75 return VIRGL_FORMAT_R8G8B8A8_UNORM;
76 case DRM_FORMAT_RGB565:
77 return VIRGL_FORMAT_B5G6R5_UNORM;
78 case DRM_FORMAT_R8:
79 return VIRGL_FORMAT_R8_UNORM;
80 case DRM_FORMAT_RG88:
81 return VIRGL_FORMAT_R8G8_UNORM;
Gurchetan Singhf5d280d2019-06-04 19:43:41 -070082 case DRM_FORMAT_NV12:
83 return VIRGL_FORMAT_NV12;
84 case DRM_FORMAT_YVU420:
85 case DRM_FORMAT_YVU420_ANDROID:
86 return VIRGL_FORMAT_YV12;
Zach Reizner85c4c5f2017-10-04 13:15:57 -070087 default:
88 return 0;
89 }
90}
91
Jason Macnakddf4ec02020-02-03 16:36:46 -080092static bool virtio_gpu_supports_format(struct virgl_supported_format_mask *supported,
93 uint32_t drm_format)
94{
95 uint32_t virgl_format = translate_format(drm_format);
96 if (!virgl_format) {
97 return false;
98 }
99
100 uint32_t bitmask_index = virgl_format / 32;
101 uint32_t bit_index = virgl_format % 32;
102 return supported->bitmask[bitmask_index] & (1 << bit_index);
103}
104
105// Adds the given buffer combination to the list of supported buffer combinations if the
106// combination is supported by the virtio backend.
107static void virtio_gpu_add_combination(struct driver *drv, uint32_t drm_format,
108 struct format_metadata *metadata, uint64_t use_flags)
109{
110 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
111
Gurchetan Singhd708f612019-09-12 17:26:45 -0700112 if (features[feat_3d].enabled && priv->caps.max_version >= 1) {
Lepton Wueebce652020-02-26 15:13:34 -0800113 if ((use_flags & BO_USE_RENDERING) &&
Jason Macnakddf4ec02020-02-03 16:36:46 -0800114 !virtio_gpu_supports_format(&priv->caps.v1.render, drm_format)) {
115 drv_log("Skipping unsupported render format: %d\n", drm_format);
116 return;
117 }
118
Lepton Wueebce652020-02-26 15:13:34 -0800119 if ((use_flags & BO_USE_TEXTURE) &&
Jason Macnakddf4ec02020-02-03 16:36:46 -0800120 !virtio_gpu_supports_format(&priv->caps.v1.sampler, drm_format)) {
121 drv_log("Skipping unsupported texture format: %d\n", drm_format);
122 return;
123 }
Lepton Wueebce652020-02-26 15:13:34 -0800124 if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 &&
125 !virtio_gpu_supports_format(&priv->caps.v2.scanout, drm_format)) {
126 drv_log("Unsupported scanout format: %d\n", drm_format);
127 use_flags &= ~BO_USE_SCANOUT;
128 }
Jason Macnakddf4ec02020-02-03 16:36:46 -0800129 }
130
131 drv_add_combination(drv, drm_format, metadata, use_flags);
132}
133
134// Adds each given buffer combination to the list of supported buffer combinations if the
135// combination supported by the virtio backend.
136static void virtio_gpu_add_combinations(struct driver *drv, const uint32_t *drm_formats,
137 uint32_t num_formats, struct format_metadata *metadata,
138 uint64_t use_flags)
139{
140 uint32_t i;
141
142 for (i = 0; i < num_formats; i++) {
143 virtio_gpu_add_combination(drv, drm_formats[i], metadata, use_flags);
144 }
145}
146
Lepton Wu249e8632018-04-05 12:50:03 -0700147static int virtio_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
148 uint64_t use_flags)
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700149{
Gurchetan Singh298b7572019-09-19 09:55:18 -0700150 if (bo->meta.format != DRM_FORMAT_R8) {
Keiichi Watanabea13dda72018-08-02 22:45:05 +0900151 width = ALIGN(width, MESA_LLVMPIPE_TILE_SIZE);
152 height = ALIGN(height, MESA_LLVMPIPE_TILE_SIZE);
153 }
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700154
Dominik Behr6e6dc492019-10-09 15:43:52 -0700155 return drv_dumb_bo_create_ex(bo, width, height, format, use_flags, BO_QUIRK_DUMB32BPP);
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700156}
157
Lepton Wudbab0832019-04-19 12:26:39 -0700158static inline void handle_flag(uint64_t *flag, uint64_t check_flag, uint32_t *bind,
159 uint32_t virgl_bind)
160{
161 if ((*flag) & check_flag) {
162 (*flag) &= ~check_flag;
163 (*bind) |= virgl_bind;
164 }
165}
166
167static uint32_t use_flags_to_bind(uint64_t use_flags)
168{
Kansho Nishidad97877b2019-06-14 18:28:18 +0900169 /* In crosvm, VIRGL_BIND_SHARED means minigbm will allocate, not virglrenderer. */
170 uint32_t bind = VIRGL_BIND_SHARED;
Lepton Wudbab0832019-04-19 12:26:39 -0700171
172 handle_flag(&use_flags, BO_USE_TEXTURE, &bind, VIRGL_BIND_SAMPLER_VIEW);
173 handle_flag(&use_flags, BO_USE_RENDERING, &bind, VIRGL_BIND_RENDER_TARGET);
174 handle_flag(&use_flags, BO_USE_SCANOUT, &bind, VIRGL_BIND_SCANOUT);
David Stevens55a6cf92019-09-03 10:45:33 +0900175 handle_flag(&use_flags, BO_USE_CURSOR, &bind, VIRGL_BIND_CURSOR);
176 handle_flag(&use_flags, BO_USE_LINEAR, &bind, VIRGL_BIND_LINEAR);
177
178 handle_flag(&use_flags, BO_USE_SW_READ_OFTEN, &bind, VIRGL_BIND_LINEAR);
179 handle_flag(&use_flags, BO_USE_SW_READ_RARELY, &bind, VIRGL_BIND_LINEAR);
180 handle_flag(&use_flags, BO_USE_SW_WRITE_OFTEN, &bind, VIRGL_BIND_LINEAR);
181 handle_flag(&use_flags, BO_USE_SW_WRITE_RARELY, &bind, VIRGL_BIND_LINEAR);
182
183 // All host drivers only support linear camera buffer formats. If
184 // that changes, this will need to be modified.
185 handle_flag(&use_flags, BO_USE_CAMERA_READ, &bind, VIRGL_BIND_LINEAR);
186 handle_flag(&use_flags, BO_USE_CAMERA_WRITE, &bind, VIRGL_BIND_LINEAR);
187
Lepton Wudbab0832019-04-19 12:26:39 -0700188 if (use_flags) {
189 drv_log("Unhandled bo use flag: %llx\n", (unsigned long long)use_flags);
190 }
Kansho Nishidad97877b2019-06-14 18:28:18 +0900191
Lepton Wudbab0832019-04-19 12:26:39 -0700192 return bind;
193}
194
Lepton Wu249e8632018-04-05 12:50:03 -0700195static int virtio_virgl_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
196 uint64_t use_flags)
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700197{
198 int ret;
Kansho Nishidad97877b2019-06-14 18:28:18 +0900199 uint32_t stride;
200 struct drm_virtgpu_resource_create res_create;
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700201
Kansho Nishidad97877b2019-06-14 18:28:18 +0900202 stride = drv_stride_from_format(format, width, 0);
203 drv_bo_from_format(bo, stride, height, format);
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700204
Kansho Nishidad97877b2019-06-14 18:28:18 +0900205 /*
206 * Setting the target is intended to ensure this resource gets bound as a 2D
207 * texture in the host renderer's GL state. All of these resource properties are
208 * sent unchanged by the kernel to the host, which in turn sends them unchanged to
209 * virglrenderer. When virglrenderer makes a resource, it will convert the target
210 * enum to the equivalent one in GL and then bind the resource to that target.
211 */
212 memset(&res_create, 0, sizeof(res_create));
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700213
Kansho Nishidad97877b2019-06-14 18:28:18 +0900214 res_create.target = PIPE_TEXTURE_2D;
215 res_create.format = translate_format(format);
216 res_create.bind = use_flags_to_bind(use_flags);
217 res_create.width = width;
218 res_create.height = height;
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700219
Kansho Nishidad97877b2019-06-14 18:28:18 +0900220 /* For virgl 3D */
221 res_create.depth = 1;
222 res_create.array_size = 1;
223 res_create.last_level = 0;
224 res_create.nr_samples = 0;
225
Gurchetan Singh298b7572019-09-19 09:55:18 -0700226 res_create.size = ALIGN(bo->meta.total_size, PAGE_SIZE); // PAGE_SIZE = 0x1000
Kansho Nishidad97877b2019-06-14 18:28:18 +0900227 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &res_create);
228 if (ret) {
229 drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_CREATE failed with %s\n", strerror(errno));
230 return ret;
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700231 }
232
Gurchetan Singh298b7572019-09-19 09:55:18 -0700233 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
Kansho Nishidad97877b2019-06-14 18:28:18 +0900234 bo->handles[plane].u32 = res_create.bo_handle;
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700235
236 return 0;
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700237}
238
Lepton Wu249e8632018-04-05 12:50:03 -0700239static void *virtio_virgl_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700240{
241 int ret;
242 struct drm_virtgpu_map gem_map;
243
244 memset(&gem_map, 0, sizeof(gem_map));
245 gem_map.handle = bo->handles[0].u32;
246
247 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map);
248 if (ret) {
Alistair Strachan0cfaaa52018-03-19 14:03:23 -0700249 drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700250 return MAP_FAILED;
251 }
252
Gurchetan Singh298b7572019-09-19 09:55:18 -0700253 vma->length = bo->meta.total_size;
254 return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700255 gem_map.offset);
256}
257
Lepton Wueebce652020-02-26 15:13:34 -0800258static int virtio_gpu_get_caps(struct driver *drv, union virgl_caps *caps, int *caps_is_v2)
Jason Macnakddf4ec02020-02-03 16:36:46 -0800259{
260 int ret;
261 struct drm_virtgpu_get_caps cap_args;
Jason Macnakddf4ec02020-02-03 16:36:46 -0800262
Lepton Wueebce652020-02-26 15:13:34 -0800263 *caps_is_v2 = 0;
Jason Macnakddf4ec02020-02-03 16:36:46 -0800264 memset(&cap_args, 0, sizeof(cap_args));
265 cap_args.addr = (unsigned long long)caps;
Gurchetan Singhd708f612019-09-12 17:26:45 -0700266 if (features[feat_capset_fix].enabled) {
Lepton Wueebce652020-02-26 15:13:34 -0800267 *caps_is_v2 = 1;
Jason Macnakddf4ec02020-02-03 16:36:46 -0800268 cap_args.cap_set_id = 2;
269 cap_args.size = sizeof(union virgl_caps);
270 } else {
271 cap_args.cap_set_id = 1;
272 cap_args.size = sizeof(struct virgl_caps_v1);
273 }
274
275 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &cap_args);
276 if (ret) {
277 drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
Lepton Wueebce652020-02-26 15:13:34 -0800278 *caps_is_v2 = 0;
Jason Macnakddf4ec02020-02-03 16:36:46 -0800279
280 // Fallback to v1
281 cap_args.cap_set_id = 1;
282 cap_args.size = sizeof(struct virgl_caps_v1);
283
284 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &cap_args);
285 if (ret) {
286 drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
287 }
288 }
289
290 return ret;
291}
292
Lepton Wu249e8632018-04-05 12:50:03 -0700293static int virtio_gpu_init(struct driver *drv)
294{
295 int ret;
296 struct virtio_gpu_priv *priv;
Lepton Wu249e8632018-04-05 12:50:03 -0700297
298 priv = calloc(1, sizeof(*priv));
299 drv->priv = priv;
Gurchetan Singhd708f612019-09-12 17:26:45 -0700300 for (uint32_t i = 0; i < ARRAY_SIZE(features); i++) {
301 struct drm_virtgpu_getparam params = { 0 };
Lepton Wu249e8632018-04-05 12:50:03 -0700302
Gurchetan Singhd708f612019-09-12 17:26:45 -0700303 params.param = features[i].feature;
304 params.value = (uint64_t)(uintptr_t)&features[i].enabled;
305 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, &params);
306 if (ret)
307 drv_log("DRM_IOCTL_VIRTGPU_GET_PARAM failed with %s\n", strerror(errno));
Lepton Wu249e8632018-04-05 12:50:03 -0700308 }
309
Gurchetan Singhd708f612019-09-12 17:26:45 -0700310 if (features[feat_3d].enabled) {
Lepton Wueebce652020-02-26 15:13:34 -0800311 virtio_gpu_get_caps(drv, &priv->caps, &priv->caps_is_v2);
Jason Macnakddf4ec02020-02-03 16:36:46 -0800312
Dominik Behr6e6dc492019-10-09 15:43:52 -0700313 /* This doesn't mean host can scanout everything, it just means host
314 * hypervisor can show it. */
Jason Macnakddf4ec02020-02-03 16:36:46 -0800315 virtio_gpu_add_combinations(drv, render_target_formats,
316 ARRAY_SIZE(render_target_formats), &LINEAR_METADATA,
317 BO_USE_RENDER_MASK | BO_USE_SCANOUT);
318 virtio_gpu_add_combinations(drv, texture_source_formats,
319 ARRAY_SIZE(texture_source_formats), &LINEAR_METADATA,
320 BO_USE_TEXTURE_MASK);
Gurchetan Singh3f3e5f92019-07-08 09:50:01 -0700321 } else {
Dominik Behr6e6dc492019-10-09 15:43:52 -0700322 /* Virtio primary plane only allows this format. */
Jason Macnakddf4ec02020-02-03 16:36:46 -0800323 virtio_gpu_add_combination(drv, DRM_FORMAT_XRGB8888, &LINEAR_METADATA,
324 BO_USE_RENDER_MASK | BO_USE_SCANOUT);
Dominik Behr6e6dc492019-10-09 15:43:52 -0700325 /* Virtio cursor plane only allows this format and Chrome cannot live without
326 * ARGB888 renderable format. */
Jason Macnakddf4ec02020-02-03 16:36:46 -0800327 virtio_gpu_add_combination(drv, DRM_FORMAT_ARGB8888, &LINEAR_METADATA,
328 BO_USE_RENDER_MASK | BO_USE_CURSOR);
Dominik Behr6e6dc492019-10-09 15:43:52 -0700329 /* Android needs more, but they cannot be bound as scanouts anymore after
330 * "drm/virtio: fix DRM_FORMAT_* handling" */
Jason Macnakddf4ec02020-02-03 16:36:46 -0800331 virtio_gpu_add_combinations(drv, render_target_formats,
332 ARRAY_SIZE(render_target_formats), &LINEAR_METADATA,
333 BO_USE_RENDER_MASK);
334 virtio_gpu_add_combinations(drv, dumb_texture_source_formats,
335 ARRAY_SIZE(dumb_texture_source_formats),
336 &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
337 virtio_gpu_add_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
338 BO_USE_SW_MASK | BO_USE_LINEAR);
Gurchetan Singh3f3e5f92019-07-08 09:50:01 -0700339 }
Lepton Wu249e8632018-04-05 12:50:03 -0700340
Gurchetan Singh71bc6652018-09-17 17:42:05 -0700341 /* Android CTS tests require this. */
Jason Macnakddf4ec02020-02-03 16:36:46 -0800342 virtio_gpu_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK);
Gurchetan Singh71bc6652018-09-17 17:42:05 -0700343
David Stevens9f7897f2019-08-09 20:20:23 +0900344 drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
Hirokazu Honda20e4a932019-12-06 15:21:45 +0900345 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
346 BO_USE_HW_VIDEO_ENCODER);
Keiichi Watanabea13dda72018-08-02 22:45:05 +0900347 drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA,
348 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
349
Lepton Wu249e8632018-04-05 12:50:03 -0700350 return drv_modify_linear_combinations(drv);
351}
352
353static void virtio_gpu_close(struct driver *drv)
354{
355 free(drv->priv);
356 drv->priv = NULL;
357}
358
359static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
360 uint64_t use_flags)
361{
Gurchetan Singhd708f612019-09-12 17:26:45 -0700362 if (features[feat_3d].enabled)
Lepton Wu249e8632018-04-05 12:50:03 -0700363 return virtio_virgl_bo_create(bo, width, height, format, use_flags);
364 else
365 return virtio_dumb_bo_create(bo, width, height, format, use_flags);
366}
367
368static int virtio_gpu_bo_destroy(struct bo *bo)
369{
Gurchetan Singhd708f612019-09-12 17:26:45 -0700370 if (features[feat_3d].enabled)
Lepton Wu249e8632018-04-05 12:50:03 -0700371 return drv_gem_bo_destroy(bo);
372 else
373 return drv_dumb_bo_destroy(bo);
374}
375
376static void *virtio_gpu_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
377{
Gurchetan Singhd708f612019-09-12 17:26:45 -0700378 if (features[feat_3d].enabled)
Lepton Wu249e8632018-04-05 12:50:03 -0700379 return virtio_virgl_bo_map(bo, vma, plane, map_flags);
380 else
381 return drv_dumb_bo_map(bo, vma, plane, map_flags);
382}
383
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700384static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
385{
386 int ret;
387 struct drm_virtgpu_3d_transfer_from_host xfer;
David Stevens4d5358d2019-10-24 14:59:31 +0900388 struct drm_virtgpu_3d_wait waitcmd;
Lepton Wu249e8632018-04-05 12:50:03 -0700389
Gurchetan Singhd708f612019-09-12 17:26:45 -0700390 if (!features[feat_3d].enabled)
Lepton Wu249e8632018-04-05 12:50:03 -0700391 return 0;
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700392
David Stevens4d5358d2019-10-24 14:59:31 +0900393 // Invalidate is only necessary if the host writes to the buffer.
David Stevensbaab6c82020-02-26 17:14:43 +0900394 if ((bo->meta.use_flags & (BO_USE_RENDERING | BO_USE_CAMERA_WRITE |
395 BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER)) == 0)
David Stevens4d5358d2019-10-24 14:59:31 +0900396 return 0;
397
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700398 memset(&xfer, 0, sizeof(xfer));
399 xfer.bo_handle = mapping->vma->handle;
400 xfer.box.x = mapping->rect.x;
401 xfer.box.y = mapping->rect.y;
402 xfer.box.w = mapping->rect.width;
403 xfer.box.h = mapping->rect.height;
404 xfer.box.d = 1;
405
David Stevensbaab6c82020-02-26 17:14:43 +0900406 if ((bo->meta.use_flags & BO_USE_RENDERING) == 0) {
407 // Unfortunately, the kernel doesn't actually pass the guest layer_stride and
408 // guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). For gbm
409 // based resources, we can work around this by using the level field to pass
410 // the stride to virglrenderer's gbm transfer code. However, we need to avoid
411 // doing this for resources which don't rely on that transfer code, which is
412 // resources with the BO_USE_RENDERING flag set.
413 // TODO(b/145993887): Send also stride when the patches are landed
414 xfer.level = bo->meta.strides[0];
415 }
Gurchetan Singh05e67cc2019-06-28 17:21:40 -0700416
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700417 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &xfer);
418 if (ret) {
Alistair Strachan0cfaaa52018-03-19 14:03:23 -0700419 drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST failed with %s\n", strerror(errno));
Stéphane Marchesin6ac299f2019-03-21 12:23:29 -0700420 return -errno;
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700421 }
422
David Stevens4d5358d2019-10-24 14:59:31 +0900423 // The transfer needs to complete before invalidate returns so that any host changes
424 // are visible and to ensure the host doesn't overwrite subsequent guest changes.
425 // TODO(b/136733358): Support returning fences from transfers
426 memset(&waitcmd, 0, sizeof(waitcmd));
427 waitcmd.handle = mapping->vma->handle;
428 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
429 if (ret) {
430 drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
431 return -errno;
432 }
433
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700434 return 0;
435}
436
437static int virtio_gpu_bo_flush(struct bo *bo, struct mapping *mapping)
438{
439 int ret;
440 struct drm_virtgpu_3d_transfer_to_host xfer;
David Stevensbaab6c82020-02-26 17:14:43 +0900441 struct drm_virtgpu_3d_wait waitcmd;
Lepton Wu249e8632018-04-05 12:50:03 -0700442
Gurchetan Singhd708f612019-09-12 17:26:45 -0700443 if (!features[feat_3d].enabled)
Lepton Wu249e8632018-04-05 12:50:03 -0700444 return 0;
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700445
446 if (!(mapping->vma->map_flags & BO_MAP_WRITE))
447 return 0;
448
449 memset(&xfer, 0, sizeof(xfer));
450 xfer.bo_handle = mapping->vma->handle;
451 xfer.box.x = mapping->rect.x;
452 xfer.box.y = mapping->rect.y;
453 xfer.box.w = mapping->rect.width;
454 xfer.box.h = mapping->rect.height;
455 xfer.box.d = 1;
456
Gurchetan Singh05e67cc2019-06-28 17:21:40 -0700457 // Unfortunately, the kernel doesn't actually pass the guest layer_stride and
458 // guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). We can use
459 // the level to work around this.
Gurchetan Singh298b7572019-09-19 09:55:18 -0700460 xfer.level = bo->meta.strides[0];
Gurchetan Singh05e67cc2019-06-28 17:21:40 -0700461
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700462 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &xfer);
463 if (ret) {
Alistair Strachan0cfaaa52018-03-19 14:03:23 -0700464 drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST failed with %s\n", strerror(errno));
Stéphane Marchesin6ac299f2019-03-21 12:23:29 -0700465 return -errno;
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700466 }
467
David Stevensbaab6c82020-02-26 17:14:43 +0900468 // If the buffer is only accessed by the host GPU, then the flush is ordered
469 // with subsequent commands. However, if other host hardware can access the
470 // buffer, we need to wait for the transfer to complete for consistency.
471 // TODO(b/136733358): Support returning fences from transfers
472 if (bo->meta.use_flags & BO_USE_NON_GPU_HW) {
473 memset(&waitcmd, 0, sizeof(waitcmd));
474 waitcmd.handle = mapping->vma->handle;
475
476 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
477 if (ret) {
478 drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
479 return -errno;
480 }
481 }
482
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700483 return 0;
484}
485
Gurchetan Singh0d44d482019-06-04 19:39:51 -0700486static uint32_t virtio_gpu_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700487{
Gurchetan Singhf5d280d2019-06-04 19:43:41 -0700488
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700489 switch (format) {
490 case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
Keiichi Watanabea13dda72018-08-02 22:45:05 +0900491 /* Camera subsystem requires NV12. */
492 if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE))
493 return DRM_FORMAT_NV12;
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700494 /*HACK: See b/28671744 */
495 return DRM_FORMAT_XBGR8888;
Lepton Wu249e8632018-04-05 12:50:03 -0700496 case DRM_FORMAT_FLEX_YCbCr_420_888:
Gurchetan Singhf5d280d2019-06-04 19:43:41 -0700497 /*
498 * All of our host drivers prefer NV12 as their flexible media format.
499 * If that changes, this will need to be modified.
500 */
Gurchetan Singhd708f612019-09-12 17:26:45 -0700501 if (features[feat_3d].enabled)
Gurchetan Singhf5d280d2019-06-04 19:43:41 -0700502 return DRM_FORMAT_NV12;
503 else
504 return DRM_FORMAT_YVU420;
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700505 default:
506 return format;
507 }
508}
509
Gurchetan Singhbc4f0232019-06-27 20:05:54 -0700510static int virtio_gpu_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
511 uint32_t offsets[DRV_MAX_PLANES])
512{
513 int ret;
514 struct drm_virtgpu_resource_info res_info;
Gurchetan Singhbc4f0232019-06-27 20:05:54 -0700515
Gurchetan Singhd708f612019-09-12 17:26:45 -0700516 if (!features[feat_3d].enabled)
Gurchetan Singhbc4f0232019-06-27 20:05:54 -0700517 return 0;
518
519 memset(&res_info, 0, sizeof(res_info));
520 res_info.bo_handle = bo->handles[0].u32;
521 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &res_info);
522 if (ret) {
523 drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_INFO failed with %s\n", strerror(errno));
524 return ret;
525 }
526
527 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) {
528 /*
529 * Currently, kernel v4.14 (Betty) doesn't have the extended resource info
530 * ioctl.
531 */
532 if (res_info.strides[plane]) {
533 strides[plane] = res_info.strides[plane];
534 offsets[plane] = res_info.offsets[plane];
535 }
536 }
537
538 return 0;
539}
540
Lepton Wu249e8632018-04-05 12:50:03 -0700541const struct backend backend_virtio_gpu = {
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700542 .name = "virtio_gpu",
543 .init = virtio_gpu_init,
Lepton Wu249e8632018-04-05 12:50:03 -0700544 .close = virtio_gpu_close,
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700545 .bo_create = virtio_gpu_bo_create,
Lepton Wu249e8632018-04-05 12:50:03 -0700546 .bo_destroy = virtio_gpu_bo_destroy,
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700547 .bo_import = drv_prime_bo_import,
Lepton Wu249e8632018-04-05 12:50:03 -0700548 .bo_map = virtio_gpu_bo_map,
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700549 .bo_unmap = drv_bo_munmap,
550 .bo_invalidate = virtio_gpu_bo_invalidate,
551 .bo_flush = virtio_gpu_bo_flush,
552 .resolve_format = virtio_gpu_resolve_format,
Gurchetan Singhbc4f0232019-06-27 20:05:54 -0700553 .resource_info = virtio_gpu_resource_info,
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700554};