blob: ce4c8ae9e3fbf65143797588d77edab4cf4dc0a7 [file] [log] [blame]
Zach Reizner85c4c5f2017-10-04 13:15:57 -07001/*
2 * Copyright 2017 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
5 */
6
Jason Macnak1de7f662020-01-24 15:05:57 -08007#include <assert.h>
Zach Reizner85c4c5f2017-10-04 13:15:57 -07008#include <errno.h>
David Stevens0fe561f2020-10-28 16:06:38 +09009#include <stdatomic.h>
Zach Reizner85c4c5f2017-10-04 13:15:57 -070010#include <stdint.h>
11#include <stdio.h>
12#include <string.h>
13#include <sys/mman.h>
Zach Reizner85c4c5f2017-10-04 13:15:57 -070014#include <xf86drm.h>
15
16#include "drv_priv.h"
Gurchetan Singh9f3110b2020-04-03 15:15:30 -070017#include "external/virgl_hw.h"
18#include "external/virgl_protocol.h"
19#include "external/virtgpu_drm.h"
Zach Reizner85c4c5f2017-10-04 13:15:57 -070020#include "helpers.h"
21#include "util.h"
Zach Reizner85c4c5f2017-10-04 13:15:57 -070022
Tao Wu33815882018-03-12 18:07:43 -070023#ifndef PAGE_SIZE
Zach Reizner85c4c5f2017-10-04 13:15:57 -070024#define PAGE_SIZE 0x1000
Tao Wu33815882018-03-12 18:07:43 -070025#endif
Zach Reizner85c4c5f2017-10-04 13:15:57 -070026#define PIPE_TEXTURE_2D 2
27
Lepton Wu249e8632018-04-05 12:50:03 -070028#define MESA_LLVMPIPE_TILE_ORDER 6
29#define MESA_LLVMPIPE_TILE_SIZE (1 << MESA_LLVMPIPE_TILE_ORDER)
30
Gurchetan Singhd708f612019-09-12 17:26:45 -070031struct feature {
32 uint64_t feature;
33 const char *name;
34 uint32_t enabled;
35};
36
37enum feature_id {
38 feat_3d,
39 feat_capset_fix,
Gurchetan Singh0ee06fb2019-09-13 17:49:20 -070040 feat_resource_blob,
41 feat_host_visible,
42 feat_host_cross_device,
Gurchetan Singhd708f612019-09-12 17:26:45 -070043 feat_max,
44};
45
46#define FEATURE(x) \
47 (struct feature) \
48 { \
49 x, #x, 0 \
50 }
51
Gurchetan Singh0ee06fb2019-09-13 17:49:20 -070052static struct feature features[] = {
53 FEATURE(VIRTGPU_PARAM_3D_FEATURES), FEATURE(VIRTGPU_PARAM_CAPSET_QUERY_FIX),
54 FEATURE(VIRTGPU_PARAM_RESOURCE_BLOB), FEATURE(VIRTGPU_PARAM_HOST_VISIBLE),
55 FEATURE(VIRTGPU_PARAM_CROSS_DEVICE),
56};
Gurchetan Singhd708f612019-09-12 17:26:45 -070057
Zach Reizner85c4c5f2017-10-04 13:15:57 -070058static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
Gurchetan Singh71bc6652018-09-17 17:42:05 -070059 DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
60 DRM_FORMAT_XRGB8888 };
Zach Reizner85c4c5f2017-10-04 13:15:57 -070061
Jason Macnak1de7f662020-01-24 15:05:57 -080062static const uint32_t dumb_texture_source_formats[] = {
63 DRM_FORMAT_R8, DRM_FORMAT_R16, DRM_FORMAT_YVU420,
64 DRM_FORMAT_NV12, DRM_FORMAT_NV21, DRM_FORMAT_YVU420_ANDROID
65};
Lepton Wu249e8632018-04-05 12:50:03 -070066
Jason Macnak1de7f662020-01-24 15:05:57 -080067static const uint32_t texture_source_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_NV21,
68 DRM_FORMAT_R8, DRM_FORMAT_R16,
69 DRM_FORMAT_RG88, DRM_FORMAT_YVU420_ANDROID };
Zach Reizner85c4c5f2017-10-04 13:15:57 -070070
Lepton Wu249e8632018-04-05 12:50:03 -070071struct virtio_gpu_priv {
Lepton Wueebce652020-02-26 15:13:34 -080072 int caps_is_v2;
Jason Macnakddf4ec02020-02-03 16:36:46 -080073 union virgl_caps caps;
Jason Macnak1de7f662020-01-24 15:05:57 -080074 int host_gbm_enabled;
David Stevens0fe561f2020-10-28 16:06:38 +090075 atomic_int next_blob_id;
Lepton Wu249e8632018-04-05 12:50:03 -070076};
77
Kansho Nishidad97877b2019-06-14 18:28:18 +090078static uint32_t translate_format(uint32_t drm_fourcc)
Zach Reizner85c4c5f2017-10-04 13:15:57 -070079{
80 switch (drm_fourcc) {
Jason Macnak1de7f662020-01-24 15:05:57 -080081 case DRM_FORMAT_BGR888:
82 case DRM_FORMAT_RGB888:
83 return VIRGL_FORMAT_R8G8B8_UNORM;
Zach Reizner85c4c5f2017-10-04 13:15:57 -070084 case DRM_FORMAT_XRGB8888:
85 return VIRGL_FORMAT_B8G8R8X8_UNORM;
86 case DRM_FORMAT_ARGB8888:
87 return VIRGL_FORMAT_B8G8R8A8_UNORM;
88 case DRM_FORMAT_XBGR8888:
89 return VIRGL_FORMAT_R8G8B8X8_UNORM;
90 case DRM_FORMAT_ABGR8888:
91 return VIRGL_FORMAT_R8G8B8A8_UNORM;
Jason Macnak1de7f662020-01-24 15:05:57 -080092 case DRM_FORMAT_ABGR16161616F:
Lepton Wufef113c2020-10-30 16:29:26 -070093 return VIRGL_FORMAT_R16G16B16A16_FLOAT;
Zach Reizner85c4c5f2017-10-04 13:15:57 -070094 case DRM_FORMAT_RGB565:
95 return VIRGL_FORMAT_B5G6R5_UNORM;
96 case DRM_FORMAT_R8:
97 return VIRGL_FORMAT_R8_UNORM;
98 case DRM_FORMAT_RG88:
99 return VIRGL_FORMAT_R8G8_UNORM;
Gurchetan Singhf5d280d2019-06-04 19:43:41 -0700100 case DRM_FORMAT_NV12:
101 return VIRGL_FORMAT_NV12;
Jason Macnak1de7f662020-01-24 15:05:57 -0800102 case DRM_FORMAT_NV21:
103 return VIRGL_FORMAT_NV21;
Gurchetan Singhf5d280d2019-06-04 19:43:41 -0700104 case DRM_FORMAT_YVU420:
105 case DRM_FORMAT_YVU420_ANDROID:
106 return VIRGL_FORMAT_YV12;
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700107 default:
108 return 0;
109 }
110}
111
Jason Macnak1de7f662020-01-24 15:05:57 -0800112static bool virtio_gpu_bitmask_supports_format(struct virgl_supported_format_mask *supported,
113 uint32_t drm_format)
Jason Macnakddf4ec02020-02-03 16:36:46 -0800114{
115 uint32_t virgl_format = translate_format(drm_format);
Gurchetan Singhcadc54f2021-02-01 12:03:11 -0800116 if (!virgl_format)
Jason Macnakddf4ec02020-02-03 16:36:46 -0800117 return false;
Jason Macnakddf4ec02020-02-03 16:36:46 -0800118
119 uint32_t bitmask_index = virgl_format / 32;
120 uint32_t bit_index = virgl_format % 32;
121 return supported->bitmask[bitmask_index] & (1 << bit_index);
122}
123
Jason Macnak1de7f662020-01-24 15:05:57 -0800124// The metadata generated here for emulated buffers is slightly different than the metadata
125// generated by drv_bo_from_format. In order to simplify transfers in the flush and invalidate
126// functions below, the emulated buffers are oversized. For example, ignoring stride alignment
127// requirements to demonstrate, a 6x6 YUV420 image buffer might have the following layout from
128// drv_bo_from_format:
129//
130// | Y | Y | Y | Y | Y | Y |
131// | Y | Y | Y | Y | Y | Y |
132// | Y | Y | Y | Y | Y | Y |
133// | Y | Y | Y | Y | Y | Y |
134// | Y | Y | Y | Y | Y | Y |
135// | Y | Y | Y | Y | Y | Y |
136// | U | U | U | U | U | U |
137// | U | U | U | V | V | V |
138// | V | V | V | V | V | V |
139//
140// where each plane immediately follows the previous plane in memory. This layout makes it
141// difficult to compute the transfers needed for example when the middle 2x2 region of the
142// image is locked and needs to be flushed/invalidated.
143//
144// Emulated multi-plane buffers instead have a layout of:
145//
146// | Y | Y | Y | Y | Y | Y |
147// | Y | Y | Y | Y | Y | Y |
148// | Y | Y | Y | Y | Y | Y |
149// | Y | Y | Y | Y | Y | Y |
150// | Y | Y | Y | Y | Y | Y |
151// | Y | Y | Y | Y | Y | Y |
152// | U | U | U | | | |
153// | U | U | U | | | |
154// | U | U | U | | | |
155// | V | V | V | | | |
156// | V | V | V | | | |
157// | V | V | V | | | |
158//
159// where each plane is placed as a sub-image (albeit with a very large stride) in order to
160// simplify transfers into 3 sub-image transfers for the above example.
161//
162// Additional note: the V-plane is not placed to the right of the U-plane due to some
163// observed failures in media framework code which assumes the V-plane is not
164// "row-interlaced" with the U-plane.
165static void virtio_gpu_get_emulated_metadata(const struct bo *bo, struct bo_metadata *metadata)
166{
167 uint32_t y_plane_height;
168 uint32_t c_plane_height;
169 uint32_t original_width = bo->meta.width;
170 uint32_t original_height = bo->meta.height;
171
172 metadata->format = DRM_FORMAT_R8;
173 switch (bo->meta.format) {
174 case DRM_FORMAT_NV12:
175 case DRM_FORMAT_NV21:
176 // Bi-planar
177 metadata->num_planes = 2;
178
179 y_plane_height = original_height;
180 c_plane_height = DIV_ROUND_UP(original_height, 2);
181
182 metadata->width = original_width;
183 metadata->height = y_plane_height + c_plane_height;
184
185 // Y-plane (full resolution)
186 metadata->strides[0] = metadata->width;
187 metadata->offsets[0] = 0;
188 metadata->sizes[0] = metadata->width * y_plane_height;
189
190 // CbCr-plane (half resolution, interleaved, placed below Y-plane)
191 metadata->strides[1] = metadata->width;
192 metadata->offsets[1] = metadata->offsets[0] + metadata->sizes[0];
193 metadata->sizes[1] = metadata->width * c_plane_height;
194
195 metadata->total_size = metadata->width * metadata->height;
196 break;
197 case DRM_FORMAT_YVU420:
198 case DRM_FORMAT_YVU420_ANDROID:
199 // Tri-planar
200 metadata->num_planes = 3;
201
202 y_plane_height = original_height;
203 c_plane_height = DIV_ROUND_UP(original_height, 2);
204
205 metadata->width = ALIGN(original_width, 32);
206 metadata->height = y_plane_height + (2 * c_plane_height);
207
208 // Y-plane (full resolution)
209 metadata->strides[0] = metadata->width;
210 metadata->offsets[0] = 0;
211 metadata->sizes[0] = metadata->width * original_height;
212
213 // Cb-plane (half resolution, placed below Y-plane)
214 metadata->strides[1] = metadata->width;
215 metadata->offsets[1] = metadata->offsets[0] + metadata->sizes[0];
216 metadata->sizes[1] = metadata->width * c_plane_height;
217
218 // Cr-plane (half resolution, placed below Cb-plane)
219 metadata->strides[2] = metadata->width;
220 metadata->offsets[2] = metadata->offsets[1] + metadata->sizes[1];
221 metadata->sizes[2] = metadata->width * c_plane_height;
222
223 metadata->total_size = metadata->width * metadata->height;
224 break;
225 default:
226 break;
227 }
228}
229
230struct virtio_transfers_params {
231 size_t xfers_needed;
232 struct rectangle xfer_boxes[DRV_MAX_PLANES];
233};
234
235static void virtio_gpu_get_emulated_transfers_params(const struct bo *bo,
236 const struct rectangle *transfer_box,
237 struct virtio_transfers_params *xfer_params)
238{
239 uint32_t y_plane_height;
240 uint32_t c_plane_height;
241 struct bo_metadata emulated_metadata;
242
243 if (transfer_box->x == 0 && transfer_box->y == 0 && transfer_box->width == bo->meta.width &&
244 transfer_box->height == bo->meta.height) {
245 virtio_gpu_get_emulated_metadata(bo, &emulated_metadata);
246
247 xfer_params->xfers_needed = 1;
248 xfer_params->xfer_boxes[0].x = 0;
249 xfer_params->xfer_boxes[0].y = 0;
250 xfer_params->xfer_boxes[0].width = emulated_metadata.width;
251 xfer_params->xfer_boxes[0].height = emulated_metadata.height;
252
253 return;
254 }
255
256 switch (bo->meta.format) {
257 case DRM_FORMAT_NV12:
258 case DRM_FORMAT_NV21:
259 // Bi-planar
260 xfer_params->xfers_needed = 2;
261
262 y_plane_height = bo->meta.height;
263 c_plane_height = DIV_ROUND_UP(bo->meta.height, 2);
264
265 // Y-plane (full resolution)
266 xfer_params->xfer_boxes[0].x = transfer_box->x;
267 xfer_params->xfer_boxes[0].y = transfer_box->y;
268 xfer_params->xfer_boxes[0].width = transfer_box->width;
269 xfer_params->xfer_boxes[0].height = transfer_box->height;
270
271 // CbCr-plane (half resolution, interleaved, placed below Y-plane)
272 xfer_params->xfer_boxes[1].x = transfer_box->x;
273 xfer_params->xfer_boxes[1].y = transfer_box->y + y_plane_height;
274 xfer_params->xfer_boxes[1].width = transfer_box->width;
275 xfer_params->xfer_boxes[1].height = DIV_ROUND_UP(transfer_box->height, 2);
276
277 break;
278 case DRM_FORMAT_YVU420:
279 case DRM_FORMAT_YVU420_ANDROID:
280 // Tri-planar
281 xfer_params->xfers_needed = 3;
282
283 y_plane_height = bo->meta.height;
284 c_plane_height = DIV_ROUND_UP(bo->meta.height, 2);
285
286 // Y-plane (full resolution)
287 xfer_params->xfer_boxes[0].x = transfer_box->x;
288 xfer_params->xfer_boxes[0].y = transfer_box->y;
289 xfer_params->xfer_boxes[0].width = transfer_box->width;
290 xfer_params->xfer_boxes[0].height = transfer_box->height;
291
292 // Cb-plane (half resolution, placed below Y-plane)
293 xfer_params->xfer_boxes[1].x = transfer_box->x;
294 xfer_params->xfer_boxes[1].y = transfer_box->y + y_plane_height;
295 xfer_params->xfer_boxes[1].width = DIV_ROUND_UP(transfer_box->width, 2);
296 xfer_params->xfer_boxes[1].height = DIV_ROUND_UP(transfer_box->height, 2);
297
298 // Cr-plane (half resolution, placed below Cb-plane)
299 xfer_params->xfer_boxes[2].x = transfer_box->x;
300 xfer_params->xfer_boxes[2].y = transfer_box->y + y_plane_height + c_plane_height;
301 xfer_params->xfer_boxes[2].width = DIV_ROUND_UP(transfer_box->width, 2);
302 xfer_params->xfer_boxes[2].height = DIV_ROUND_UP(transfer_box->height, 2);
303
304 break;
305 }
306}
307
308static bool virtio_gpu_supports_combination_natively(struct driver *drv, uint32_t drm_format,
309 uint64_t use_flags)
310{
311 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
312
Gurchetan Singhcadc54f2021-02-01 12:03:11 -0800313 if (priv->caps.max_version == 0)
Jason Macnak1de7f662020-01-24 15:05:57 -0800314 return true;
Jason Macnak1de7f662020-01-24 15:05:57 -0800315
316 if ((use_flags & BO_USE_RENDERING) &&
Gurchetan Singhcadc54f2021-02-01 12:03:11 -0800317 !virtio_gpu_bitmask_supports_format(&priv->caps.v1.render, drm_format))
Jason Macnak1de7f662020-01-24 15:05:57 -0800318 return false;
Jason Macnak1de7f662020-01-24 15:05:57 -0800319
320 if ((use_flags & BO_USE_TEXTURE) &&
Gurchetan Singhcadc54f2021-02-01 12:03:11 -0800321 !virtio_gpu_bitmask_supports_format(&priv->caps.v1.sampler, drm_format))
Jason Macnak1de7f662020-01-24 15:05:57 -0800322 return false;
Jason Macnak1de7f662020-01-24 15:05:57 -0800323
324 if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 &&
Gurchetan Singhcadc54f2021-02-01 12:03:11 -0800325 !virtio_gpu_bitmask_supports_format(&priv->caps.v2.scanout, drm_format))
Jason Macnak1de7f662020-01-24 15:05:57 -0800326 return false;
Jason Macnak1de7f662020-01-24 15:05:57 -0800327
328 return true;
329}
330
331// For virtio backends that do not support formats natively (e.g. multi-planar formats are not
332// supported in virglrenderer when gbm is unavailable on the host machine), whether or not the
333// format and usage combination can be handled as a blob (byte buffer).
334static bool virtio_gpu_supports_combination_through_emulation(struct driver *drv,
335 uint32_t drm_format,
336 uint64_t use_flags)
337{
338 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
339
340 // Only enable emulation on non-gbm virtio backends.
Gurchetan Singhcadc54f2021-02-01 12:03:11 -0800341 if (priv->host_gbm_enabled)
Jason Macnak1de7f662020-01-24 15:05:57 -0800342 return false;
Jason Macnak1de7f662020-01-24 15:05:57 -0800343
Gurchetan Singhcadc54f2021-02-01 12:03:11 -0800344 if (use_flags & (BO_USE_RENDERING | BO_USE_SCANOUT))
Jason Macnak1de7f662020-01-24 15:05:57 -0800345 return false;
Jason Macnak1de7f662020-01-24 15:05:57 -0800346
Gurchetan Singhcadc54f2021-02-01 12:03:11 -0800347 if (!virtio_gpu_supports_combination_natively(drv, DRM_FORMAT_R8, use_flags))
Jason Macnak1de7f662020-01-24 15:05:57 -0800348 return false;
Jason Macnak1de7f662020-01-24 15:05:57 -0800349
350 return drm_format == DRM_FORMAT_NV12 || drm_format == DRM_FORMAT_NV21 ||
351 drm_format == DRM_FORMAT_YVU420 || drm_format == DRM_FORMAT_YVU420_ANDROID;
352}
353
Jason Macnakddf4ec02020-02-03 16:36:46 -0800354// Adds the given buffer combination to the list of supported buffer combinations if the
355// combination is supported by the virtio backend.
356static void virtio_gpu_add_combination(struct driver *drv, uint32_t drm_format,
357 struct format_metadata *metadata, uint64_t use_flags)
358{
359 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
360
Gurchetan Singhd708f612019-09-12 17:26:45 -0700361 if (features[feat_3d].enabled && priv->caps.max_version >= 1) {
Jason Macnak1de7f662020-01-24 15:05:57 -0800362 if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 &&
363 !virtio_gpu_supports_combination_natively(drv, drm_format, use_flags)) {
364 drv_log("Scanout format: %d\n", drm_format);
365 use_flags &= ~BO_USE_SCANOUT;
Jason Macnakddf4ec02020-02-03 16:36:46 -0800366 }
367
Jason Macnak1de7f662020-01-24 15:05:57 -0800368 if (!virtio_gpu_supports_combination_natively(drv, drm_format, use_flags) &&
369 !virtio_gpu_supports_combination_through_emulation(drv, drm_format,
370 use_flags)) {
371 drv_log("Skipping unsupported combination format:%d\n", drm_format);
Jason Macnakddf4ec02020-02-03 16:36:46 -0800372 return;
373 }
374 }
375
376 drv_add_combination(drv, drm_format, metadata, use_flags);
377}
378
379// Adds each given buffer combination to the list of supported buffer combinations if the
380// combination supported by the virtio backend.
381static void virtio_gpu_add_combinations(struct driver *drv, const uint32_t *drm_formats,
382 uint32_t num_formats, struct format_metadata *metadata,
383 uint64_t use_flags)
384{
385 uint32_t i;
386
Gurchetan Singhcadc54f2021-02-01 12:03:11 -0800387 for (i = 0; i < num_formats; i++)
Jason Macnakddf4ec02020-02-03 16:36:46 -0800388 virtio_gpu_add_combination(drv, drm_formats[i], metadata, use_flags);
Jason Macnakddf4ec02020-02-03 16:36:46 -0800389}
390
Lepton Wu249e8632018-04-05 12:50:03 -0700391static int virtio_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
392 uint64_t use_flags)
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700393{
Gurchetan Singh298b7572019-09-19 09:55:18 -0700394 if (bo->meta.format != DRM_FORMAT_R8) {
Keiichi Watanabea13dda72018-08-02 22:45:05 +0900395 width = ALIGN(width, MESA_LLVMPIPE_TILE_SIZE);
396 height = ALIGN(height, MESA_LLVMPIPE_TILE_SIZE);
397 }
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700398
Dominik Behr6e6dc492019-10-09 15:43:52 -0700399 return drv_dumb_bo_create_ex(bo, width, height, format, use_flags, BO_QUIRK_DUMB32BPP);
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700400}
401
Lepton Wudbab0832019-04-19 12:26:39 -0700402static inline void handle_flag(uint64_t *flag, uint64_t check_flag, uint32_t *bind,
403 uint32_t virgl_bind)
404{
405 if ((*flag) & check_flag) {
406 (*flag) &= ~check_flag;
407 (*bind) |= virgl_bind;
408 }
409}
410
David Stevenscf280482020-12-21 11:43:44 +0900411static uint32_t compute_virgl_bind_flags(uint64_t use_flags, uint32_t format)
Lepton Wudbab0832019-04-19 12:26:39 -0700412{
Kansho Nishidad97877b2019-06-14 18:28:18 +0900413 /* In crosvm, VIRGL_BIND_SHARED means minigbm will allocate, not virglrenderer. */
414 uint32_t bind = VIRGL_BIND_SHARED;
Lepton Wudbab0832019-04-19 12:26:39 -0700415
416 handle_flag(&use_flags, BO_USE_TEXTURE, &bind, VIRGL_BIND_SAMPLER_VIEW);
417 handle_flag(&use_flags, BO_USE_RENDERING, &bind, VIRGL_BIND_RENDER_TARGET);
418 handle_flag(&use_flags, BO_USE_SCANOUT, &bind, VIRGL_BIND_SCANOUT);
David Stevens55a6cf92019-09-03 10:45:33 +0900419 handle_flag(&use_flags, BO_USE_CURSOR, &bind, VIRGL_BIND_CURSOR);
420 handle_flag(&use_flags, BO_USE_LINEAR, &bind, VIRGL_BIND_LINEAR);
421
David Stevens23de4e22020-05-15 14:15:35 +0900422 if (use_flags & BO_USE_PROTECTED) {
423 handle_flag(&use_flags, BO_USE_PROTECTED, &bind, VIRGL_BIND_MINIGBM_PROTECTED);
424 } else {
425 // Make sure we don't set both flags, since that could be mistaken for
426 // protected. Give OFTEN priority over RARELY.
427 if (use_flags & BO_USE_SW_READ_OFTEN) {
428 handle_flag(&use_flags, BO_USE_SW_READ_OFTEN, &bind,
429 VIRGL_BIND_MINIGBM_SW_READ_OFTEN);
430 } else {
431 handle_flag(&use_flags, BO_USE_SW_READ_RARELY, &bind,
432 VIRGL_BIND_MINIGBM_SW_READ_RARELY);
433 }
434 if (use_flags & BO_USE_SW_WRITE_OFTEN) {
435 handle_flag(&use_flags, BO_USE_SW_WRITE_OFTEN, &bind,
436 VIRGL_BIND_MINIGBM_SW_WRITE_OFTEN);
437 } else {
438 handle_flag(&use_flags, BO_USE_SW_WRITE_RARELY, &bind,
439 VIRGL_BIND_MINIGBM_SW_WRITE_RARELY);
440 }
441 }
David Stevens55a6cf92019-09-03 10:45:33 +0900442
David Stevens23de4e22020-05-15 14:15:35 +0900443 handle_flag(&use_flags, BO_USE_CAMERA_WRITE, &bind, VIRGL_BIND_MINIGBM_CAMERA_WRITE);
444 handle_flag(&use_flags, BO_USE_CAMERA_READ, &bind, VIRGL_BIND_MINIGBM_CAMERA_READ);
445 handle_flag(&use_flags, BO_USE_HW_VIDEO_DECODER, &bind,
446 VIRGL_BIND_MINIGBM_HW_VIDEO_DECODER);
447 handle_flag(&use_flags, BO_USE_HW_VIDEO_ENCODER, &bind,
448 VIRGL_BIND_MINIGBM_HW_VIDEO_ENCODER);
David Stevens55a6cf92019-09-03 10:45:33 +0900449
David Stevenscf280482020-12-21 11:43:44 +0900450 /*
451 * HACK: This is for HAL_PIXEL_FORMAT_YV12 buffers allocated by arcvm. None of
452 * our platforms can display YV12, so we can treat as a SW buffer. Remove once
453 * this can be intelligently resolved in the guest. Also see gbm_bo_create.
454 */
Gurchetan Singhcadc54f2021-02-01 12:03:11 -0800455 if (format == DRM_FORMAT_YVU420_ANDROID)
David Stevenscf280482020-12-21 11:43:44 +0900456 bind |= VIRGL_BIND_LINEAR;
David Stevenscf280482020-12-21 11:43:44 +0900457
Gurchetan Singhcadc54f2021-02-01 12:03:11 -0800458 if (use_flags)
Lepton Wudbab0832019-04-19 12:26:39 -0700459 drv_log("Unhandled bo use flag: %llx\n", (unsigned long long)use_flags);
Kansho Nishidad97877b2019-06-14 18:28:18 +0900460
Lepton Wudbab0832019-04-19 12:26:39 -0700461 return bind;
462}
463
Lepton Wu249e8632018-04-05 12:50:03 -0700464static int virtio_virgl_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
465 uint64_t use_flags)
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700466{
467 int ret;
Jason Macnak1de7f662020-01-24 15:05:57 -0800468 size_t i;
Kansho Nishidad97877b2019-06-14 18:28:18 +0900469 uint32_t stride;
Gurchetan Singh99644382020-10-07 15:28:11 -0700470 struct drm_virtgpu_resource_create res_create = { 0 };
Jason Macnak1de7f662020-01-24 15:05:57 -0800471 struct bo_metadata emulated_metadata;
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700472
Jason Macnak1de7f662020-01-24 15:05:57 -0800473 if (virtio_gpu_supports_combination_natively(bo->drv, format, use_flags)) {
474 stride = drv_stride_from_format(format, width, 0);
475 drv_bo_from_format(bo, stride, height, format);
476 } else {
477 assert(
478 virtio_gpu_supports_combination_through_emulation(bo->drv, format, use_flags));
479
480 virtio_gpu_get_emulated_metadata(bo, &emulated_metadata);
481
482 format = emulated_metadata.format;
483 width = emulated_metadata.width;
484 height = emulated_metadata.height;
485 for (i = 0; i < emulated_metadata.num_planes; i++) {
486 bo->meta.strides[i] = emulated_metadata.strides[i];
487 bo->meta.offsets[i] = emulated_metadata.offsets[i];
488 bo->meta.sizes[i] = emulated_metadata.sizes[i];
489 }
490 bo->meta.total_size = emulated_metadata.total_size;
491 }
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700492
Kansho Nishidad97877b2019-06-14 18:28:18 +0900493 /*
494 * Setting the target is intended to ensure this resource gets bound as a 2D
495 * texture in the host renderer's GL state. All of these resource properties are
496 * sent unchanged by the kernel to the host, which in turn sends them unchanged to
497 * virglrenderer. When virglrenderer makes a resource, it will convert the target
498 * enum to the equivalent one in GL and then bind the resource to that target.
499 */
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700500
Kansho Nishidad97877b2019-06-14 18:28:18 +0900501 res_create.target = PIPE_TEXTURE_2D;
502 res_create.format = translate_format(format);
David Stevenscf280482020-12-21 11:43:44 +0900503 res_create.bind = compute_virgl_bind_flags(use_flags, format);
Kansho Nishidad97877b2019-06-14 18:28:18 +0900504 res_create.width = width;
505 res_create.height = height;
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700506
Kansho Nishidad97877b2019-06-14 18:28:18 +0900507 /* For virgl 3D */
508 res_create.depth = 1;
509 res_create.array_size = 1;
510 res_create.last_level = 0;
511 res_create.nr_samples = 0;
512
Gurchetan Singh298b7572019-09-19 09:55:18 -0700513 res_create.size = ALIGN(bo->meta.total_size, PAGE_SIZE); // PAGE_SIZE = 0x1000
Kansho Nishidad97877b2019-06-14 18:28:18 +0900514 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &res_create);
515 if (ret) {
516 drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_CREATE failed with %s\n", strerror(errno));
517 return ret;
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700518 }
519
Gurchetan Singh298b7572019-09-19 09:55:18 -0700520 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
Kansho Nishidad97877b2019-06-14 18:28:18 +0900521 bo->handles[plane].u32 = res_create.bo_handle;
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700522
523 return 0;
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700524}
525
Lepton Wu249e8632018-04-05 12:50:03 -0700526static void *virtio_virgl_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700527{
528 int ret;
Gurchetan Singh99644382020-10-07 15:28:11 -0700529 struct drm_virtgpu_map gem_map = { 0 };
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700530
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700531 gem_map.handle = bo->handles[0].u32;
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700532 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map);
533 if (ret) {
Alistair Strachan0cfaaa52018-03-19 14:03:23 -0700534 drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700535 return MAP_FAILED;
536 }
537
Gurchetan Singh298b7572019-09-19 09:55:18 -0700538 vma->length = bo->meta.total_size;
539 return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700540 gem_map.offset);
541}
542
Lepton Wueebce652020-02-26 15:13:34 -0800543static int virtio_gpu_get_caps(struct driver *drv, union virgl_caps *caps, int *caps_is_v2)
Jason Macnakddf4ec02020-02-03 16:36:46 -0800544{
545 int ret;
Gurchetan Singh99644382020-10-07 15:28:11 -0700546 struct drm_virtgpu_get_caps cap_args = { 0 };
Jason Macnakddf4ec02020-02-03 16:36:46 -0800547
Lepton Wueebce652020-02-26 15:13:34 -0800548 *caps_is_v2 = 0;
Jason Macnakddf4ec02020-02-03 16:36:46 -0800549 cap_args.addr = (unsigned long long)caps;
Gurchetan Singhd708f612019-09-12 17:26:45 -0700550 if (features[feat_capset_fix].enabled) {
Lepton Wueebce652020-02-26 15:13:34 -0800551 *caps_is_v2 = 1;
Jason Macnakddf4ec02020-02-03 16:36:46 -0800552 cap_args.cap_set_id = 2;
553 cap_args.size = sizeof(union virgl_caps);
554 } else {
555 cap_args.cap_set_id = 1;
556 cap_args.size = sizeof(struct virgl_caps_v1);
557 }
558
559 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &cap_args);
560 if (ret) {
561 drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
Lepton Wueebce652020-02-26 15:13:34 -0800562 *caps_is_v2 = 0;
Jason Macnakddf4ec02020-02-03 16:36:46 -0800563
564 // Fallback to v1
565 cap_args.cap_set_id = 1;
566 cap_args.size = sizeof(struct virgl_caps_v1);
567
568 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &cap_args);
Gurchetan Singhcadc54f2021-02-01 12:03:11 -0800569 if (ret)
Jason Macnakddf4ec02020-02-03 16:36:46 -0800570 drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
Jason Macnakddf4ec02020-02-03 16:36:46 -0800571 }
572
573 return ret;
574}
575
Jason Macnak1de7f662020-01-24 15:05:57 -0800576static void virtio_gpu_init_features_and_caps(struct driver *drv)
Lepton Wu249e8632018-04-05 12:50:03 -0700577{
Jason Macnak1de7f662020-01-24 15:05:57 -0800578 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
Lepton Wu249e8632018-04-05 12:50:03 -0700579
Gurchetan Singhd708f612019-09-12 17:26:45 -0700580 for (uint32_t i = 0; i < ARRAY_SIZE(features); i++) {
581 struct drm_virtgpu_getparam params = { 0 };
Lepton Wu249e8632018-04-05 12:50:03 -0700582
Gurchetan Singhd708f612019-09-12 17:26:45 -0700583 params.param = features[i].feature;
584 params.value = (uint64_t)(uintptr_t)&features[i].enabled;
Jason Macnak1de7f662020-01-24 15:05:57 -0800585 int ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, &params);
Gurchetan Singhd708f612019-09-12 17:26:45 -0700586 if (ret)
587 drv_log("DRM_IOCTL_VIRTGPU_GET_PARAM failed with %s\n", strerror(errno));
Lepton Wu249e8632018-04-05 12:50:03 -0700588 }
589
Gurchetan Singhcadc54f2021-02-01 12:03:11 -0800590 if (features[feat_3d].enabled)
Lepton Wueebce652020-02-26 15:13:34 -0800591 virtio_gpu_get_caps(drv, &priv->caps, &priv->caps_is_v2);
Jason Macnakddf4ec02020-02-03 16:36:46 -0800592
Jason Macnak1de7f662020-01-24 15:05:57 -0800593 // Multi-planar formats are currently only supported in virglrenderer through gbm.
594 priv->host_gbm_enabled =
Jason Macnak7e4325e2021-02-01 15:17:01 -0800595 features[feat_3d].enabled &&
Jason Macnak1de7f662020-01-24 15:05:57 -0800596 virtio_gpu_supports_combination_natively(drv, DRM_FORMAT_NV12, BO_USE_TEXTURE);
597}
598
599static int virtio_gpu_init(struct driver *drv)
600{
601 struct virtio_gpu_priv *priv;
602
603 priv = calloc(1, sizeof(*priv));
604 drv->priv = priv;
605
606 virtio_gpu_init_features_and_caps(drv);
607
608 if (features[feat_3d].enabled) {
Dominik Behr6e6dc492019-10-09 15:43:52 -0700609 /* This doesn't mean host can scanout everything, it just means host
610 * hypervisor can show it. */
Jason Macnakddf4ec02020-02-03 16:36:46 -0800611 virtio_gpu_add_combinations(drv, render_target_formats,
612 ARRAY_SIZE(render_target_formats), &LINEAR_METADATA,
613 BO_USE_RENDER_MASK | BO_USE_SCANOUT);
614 virtio_gpu_add_combinations(drv, texture_source_formats,
615 ARRAY_SIZE(texture_source_formats), &LINEAR_METADATA,
616 BO_USE_TEXTURE_MASK);
Gurchetan Singh3f3e5f92019-07-08 09:50:01 -0700617 } else {
Dominik Behr6e6dc492019-10-09 15:43:52 -0700618 /* Virtio primary plane only allows this format. */
Jason Macnakddf4ec02020-02-03 16:36:46 -0800619 virtio_gpu_add_combination(drv, DRM_FORMAT_XRGB8888, &LINEAR_METADATA,
620 BO_USE_RENDER_MASK | BO_USE_SCANOUT);
Dominik Behr6e6dc492019-10-09 15:43:52 -0700621 /* Virtio cursor plane only allows this format and Chrome cannot live without
622 * ARGB888 renderable format. */
Jason Macnakddf4ec02020-02-03 16:36:46 -0800623 virtio_gpu_add_combination(drv, DRM_FORMAT_ARGB8888, &LINEAR_METADATA,
624 BO_USE_RENDER_MASK | BO_USE_CURSOR);
Dominik Behr6e6dc492019-10-09 15:43:52 -0700625 /* Android needs more, but they cannot be bound as scanouts anymore after
626 * "drm/virtio: fix DRM_FORMAT_* handling" */
Jason Macnakddf4ec02020-02-03 16:36:46 -0800627 virtio_gpu_add_combinations(drv, render_target_formats,
628 ARRAY_SIZE(render_target_formats), &LINEAR_METADATA,
629 BO_USE_RENDER_MASK);
630 virtio_gpu_add_combinations(drv, dumb_texture_source_formats,
631 ARRAY_SIZE(dumb_texture_source_formats),
632 &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
633 virtio_gpu_add_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
634 BO_USE_SW_MASK | BO_USE_LINEAR);
Jason Macnak1de7f662020-01-24 15:05:57 -0800635 virtio_gpu_add_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA,
636 BO_USE_SW_MASK | BO_USE_LINEAR);
Gurchetan Singh3f3e5f92019-07-08 09:50:01 -0700637 }
Lepton Wu249e8632018-04-05 12:50:03 -0700638
Gurchetan Singh71bc6652018-09-17 17:42:05 -0700639 /* Android CTS tests require this. */
Jason Macnak1de7f662020-01-24 15:05:57 -0800640 virtio_gpu_add_combination(drv, DRM_FORMAT_RGB888, &LINEAR_METADATA, BO_USE_SW_MASK);
Jason Macnakddf4ec02020-02-03 16:36:46 -0800641 virtio_gpu_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK);
Jason Macnak1de7f662020-01-24 15:05:57 -0800642 virtio_gpu_add_combination(drv, DRM_FORMAT_ABGR16161616F, &LINEAR_METADATA,
643 BO_USE_SW_MASK | BO_USE_TEXTURE_MASK);
Gurchetan Singh71bc6652018-09-17 17:42:05 -0700644
David Stevens9f7897f2019-08-09 20:20:23 +0900645 drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
Hirokazu Honda20e4a932019-12-06 15:21:45 +0900646 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
647 BO_USE_HW_VIDEO_ENCODER);
Keiichi Watanabea13dda72018-08-02 22:45:05 +0900648 drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA,
David Staessens04b7e242020-05-28 15:47:15 +0900649 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
650 BO_USE_HW_VIDEO_ENCODER);
David Stevens519978f2020-12-11 14:09:56 +0900651
652 if (!priv->host_gbm_enabled) {
653 drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &LINEAR_METADATA,
654 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE |
655 BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
656 drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &LINEAR_METADATA,
657 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE |
658 BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
659 drv_modify_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA,
660 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE |
661 BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
662 drv_modify_combination(drv, DRM_FORMAT_R16, &LINEAR_METADATA,
663 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE |
664 BO_USE_HW_VIDEO_DECODER);
665 drv_modify_combination(drv, DRM_FORMAT_YVU420, &LINEAR_METADATA,
666 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE |
667 BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
668 drv_modify_combination(drv, DRM_FORMAT_YVU420_ANDROID, &LINEAR_METADATA,
669 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE |
670 BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
671 }
Keiichi Watanabea13dda72018-08-02 22:45:05 +0900672
Lepton Wu249e8632018-04-05 12:50:03 -0700673 return drv_modify_linear_combinations(drv);
674}
675
676static void virtio_gpu_close(struct driver *drv)
677{
678 free(drv->priv);
679 drv->priv = NULL;
680}
681
Gurchetan Singh0ee06fb2019-09-13 17:49:20 -0700682static int virtio_gpu_bo_create_blob(struct driver *drv, struct bo *bo)
683{
684 int ret;
685 uint32_t stride;
David Stevens0fe561f2020-10-28 16:06:38 +0900686 uint32_t cur_blob_id;
Gurchetan Singh0ee06fb2019-09-13 17:49:20 -0700687 uint32_t cmd[VIRGL_PIPE_RES_CREATE_SIZE + 1] = { 0 };
688 struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
David Stevens0fe561f2020-10-28 16:06:38 +0900689 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
Gurchetan Singh0ee06fb2019-09-13 17:49:20 -0700690
David Stevensd3f07bd2020-09-25 18:52:26 +0900691 uint32_t blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
692 if (bo->meta.use_flags & BO_USE_SW_MASK)
693 blob_flags |= VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
694 if (bo->meta.use_flags & BO_USE_NON_GPU_HW)
David Stevensb42624c2020-09-10 10:50:26 +0900695 blob_flags |= VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE;
David Stevensb42624c2020-09-10 10:50:26 +0900696
David Stevens0fe561f2020-10-28 16:06:38 +0900697 cur_blob_id = atomic_fetch_add(&priv->next_blob_id, 1);
Gurchetan Singh0ee06fb2019-09-13 17:49:20 -0700698 stride = drv_stride_from_format(bo->meta.format, bo->meta.width, 0);
699 drv_bo_from_format(bo, stride, bo->meta.height, bo->meta.format);
700 bo->meta.total_size = ALIGN(bo->meta.total_size, PAGE_SIZE);
David Stevensb42624c2020-09-10 10:50:26 +0900701 bo->meta.tiling = blob_flags;
Gurchetan Singh0ee06fb2019-09-13 17:49:20 -0700702
703 cmd[0] = VIRGL_CMD0(VIRGL_CCMD_PIPE_RESOURCE_CREATE, 0, VIRGL_PIPE_RES_CREATE_SIZE);
704 cmd[VIRGL_PIPE_RES_CREATE_TARGET] = PIPE_TEXTURE_2D;
705 cmd[VIRGL_PIPE_RES_CREATE_WIDTH] = bo->meta.width;
706 cmd[VIRGL_PIPE_RES_CREATE_HEIGHT] = bo->meta.height;
707 cmd[VIRGL_PIPE_RES_CREATE_FORMAT] = translate_format(bo->meta.format);
David Stevenscf280482020-12-21 11:43:44 +0900708 cmd[VIRGL_PIPE_RES_CREATE_BIND] =
709 compute_virgl_bind_flags(bo->meta.use_flags, bo->meta.format);
Gurchetan Singh0ee06fb2019-09-13 17:49:20 -0700710 cmd[VIRGL_PIPE_RES_CREATE_DEPTH] = 1;
David Stevens0fe561f2020-10-28 16:06:38 +0900711 cmd[VIRGL_PIPE_RES_CREATE_BLOB_ID] = cur_blob_id;
Gurchetan Singh0ee06fb2019-09-13 17:49:20 -0700712
713 drm_rc_blob.cmd = (uint64_t)&cmd;
714 drm_rc_blob.cmd_size = 4 * (VIRGL_PIPE_RES_CREATE_SIZE + 1);
715 drm_rc_blob.size = bo->meta.total_size;
716 drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST3D;
David Stevensb42624c2020-09-10 10:50:26 +0900717 drm_rc_blob.blob_flags = blob_flags;
David Stevens0fe561f2020-10-28 16:06:38 +0900718 drm_rc_blob.blob_id = cur_blob_id;
Gurchetan Singh0ee06fb2019-09-13 17:49:20 -0700719
720 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
721 if (ret < 0) {
722 drv_log("DRM_VIRTGPU_RESOURCE_CREATE_BLOB failed with %s\n", strerror(errno));
723 return -errno;
724 }
725
726 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
727 bo->handles[plane].u32 = drm_rc_blob.bo_handle;
728
729 return 0;
730}
731
732static bool should_use_blob(struct driver *drv, uint32_t format, uint64_t use_flags)
733{
734 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
735
736 // TODO(gurchetansingh): remove once all minigbm users are blob-safe
737#ifndef VIRTIO_GPU_NEXT
738 return false;
739#endif
740
741 // Only use blob when host gbm is available
742 if (!priv->host_gbm_enabled)
743 return false;
744
David Stevensd3f07bd2020-09-25 18:52:26 +0900745 // Use regular resources if only the GPU needs efficient access
746 if (!(use_flags &
747 (BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | BO_USE_LINEAR | BO_USE_NON_GPU_HW)))
Gurchetan Singh0ee06fb2019-09-13 17:49:20 -0700748 return false;
749
David Stevensd3f07bd2020-09-25 18:52:26 +0900750 switch (format) {
751 case DRM_FORMAT_YVU420_ANDROID:
752 case DRM_FORMAT_R8:
753 // Formats with strictly defined strides are supported
Gurchetan Singh0ee06fb2019-09-13 17:49:20 -0700754 return true;
David Stevensd3f07bd2020-09-25 18:52:26 +0900755 case DRM_FORMAT_NV12:
756 // Knowing buffer metadata at buffer creation isn't yet supported, so buffers
757 // can't be properly mapped into the guest.
758 return (use_flags & BO_USE_SW_MASK) == 0;
759 default:
760 return false;
761 }
Gurchetan Singh0ee06fb2019-09-13 17:49:20 -0700762}
763
Lepton Wu249e8632018-04-05 12:50:03 -0700764static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
765 uint64_t use_flags)
766{
Gurchetan Singh0ee06fb2019-09-13 17:49:20 -0700767 if (features[feat_resource_blob].enabled && features[feat_host_visible].enabled &&
768 should_use_blob(bo->drv, format, use_flags))
769 return virtio_gpu_bo_create_blob(bo->drv, bo);
770
Gurchetan Singhd708f612019-09-12 17:26:45 -0700771 if (features[feat_3d].enabled)
Lepton Wu249e8632018-04-05 12:50:03 -0700772 return virtio_virgl_bo_create(bo, width, height, format, use_flags);
773 else
774 return virtio_dumb_bo_create(bo, width, height, format, use_flags);
775}
776
777static int virtio_gpu_bo_destroy(struct bo *bo)
778{
Gurchetan Singhd708f612019-09-12 17:26:45 -0700779 if (features[feat_3d].enabled)
Lepton Wu249e8632018-04-05 12:50:03 -0700780 return drv_gem_bo_destroy(bo);
781 else
782 return drv_dumb_bo_destroy(bo);
783}
784
785static void *virtio_gpu_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
786{
Gurchetan Singhd708f612019-09-12 17:26:45 -0700787 if (features[feat_3d].enabled)
Lepton Wu249e8632018-04-05 12:50:03 -0700788 return virtio_virgl_bo_map(bo, vma, plane, map_flags);
789 else
790 return drv_dumb_bo_map(bo, vma, plane, map_flags);
791}
792
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700793static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
794{
795 int ret;
Jason Macnak1de7f662020-01-24 15:05:57 -0800796 size_t i;
Gurchetan Singh99644382020-10-07 15:28:11 -0700797 struct drm_virtgpu_3d_transfer_from_host xfer = { 0 };
798 struct drm_virtgpu_3d_wait waitcmd = { 0 };
Jason Macnak1de7f662020-01-24 15:05:57 -0800799 struct virtio_transfers_params xfer_params;
800 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
David Stevens9fe8c202020-12-21 18:47:55 +0900801 uint64_t host_write_flags;
Lepton Wu249e8632018-04-05 12:50:03 -0700802
Gurchetan Singhd708f612019-09-12 17:26:45 -0700803 if (!features[feat_3d].enabled)
Lepton Wu249e8632018-04-05 12:50:03 -0700804 return 0;
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700805
David Stevens9fe8c202020-12-21 18:47:55 +0900806 // Invalidate is only necessary if the host writes to the buffer. The encoder and
807 // decoder flags don't differentiate between input and output buffers, but we can
808 // use the format to determine whether this buffer could be encoder/decoder output.
809 host_write_flags = BO_USE_RENDERING | BO_USE_CAMERA_WRITE;
Gurchetan Singhcadc54f2021-02-01 12:03:11 -0800810 if (bo->meta.format == DRM_FORMAT_R8)
David Stevens9fe8c202020-12-21 18:47:55 +0900811 host_write_flags |= BO_USE_HW_VIDEO_ENCODER;
Gurchetan Singhcadc54f2021-02-01 12:03:11 -0800812 else
David Stevens9fe8c202020-12-21 18:47:55 +0900813 host_write_flags |= BO_USE_HW_VIDEO_DECODER;
Gurchetan Singhcadc54f2021-02-01 12:03:11 -0800814
David Stevens9fe8c202020-12-21 18:47:55 +0900815 if ((bo->meta.use_flags & host_write_flags) == 0)
David Stevens4d5358d2019-10-24 14:59:31 +0900816 return 0;
817
Gurchetan Singh0ee06fb2019-09-13 17:49:20 -0700818 if (features[feat_resource_blob].enabled &&
819 (bo->meta.tiling & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
820 return 0;
821
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700822 xfer.bo_handle = mapping->vma->handle;
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700823
Gurchetan Singh1b57fe22020-05-05 09:18:22 -0700824 if (mapping->rect.x || mapping->rect.y) {
Gurchetan Singh1b57fe22020-05-05 09:18:22 -0700825 /*
826 * virglrenderer uses the box parameters and assumes that offset == 0 for planar
827 * images
828 */
829 if (bo->meta.num_planes == 1) {
830 xfer.offset =
831 (bo->meta.strides[0] * mapping->rect.y) +
832 drv_bytes_per_pixel_from_format(bo->meta.format, 0) * mapping->rect.x;
833 }
834 }
835
David Stevensbaab6c82020-02-26 17:14:43 +0900836 if ((bo->meta.use_flags & BO_USE_RENDERING) == 0) {
Jason Macnak1de7f662020-01-24 15:05:57 -0800837 // Unfortunately, the kernel doesn't actually pass the guest layer_stride
838 // and guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h).
839 // For gbm based resources, we can work around this by using the level field
840 // to pass the stride to virglrenderer's gbm transfer code. However, we need
841 // to avoid doing this for resources which don't rely on that transfer code,
842 // which is resources with the BO_USE_RENDERING flag set.
David Stevensbaab6c82020-02-26 17:14:43 +0900843 // TODO(b/145993887): Send also stride when the patches are landed
Gurchetan Singhcadc54f2021-02-01 12:03:11 -0800844 if (priv->host_gbm_enabled)
Jason Macnak1de7f662020-01-24 15:05:57 -0800845 xfer.level = bo->meta.strides[0];
David Stevensbaab6c82020-02-26 17:14:43 +0900846 }
Gurchetan Singh05e67cc2019-06-28 17:21:40 -0700847
Jason Macnak1de7f662020-01-24 15:05:57 -0800848 if (virtio_gpu_supports_combination_natively(bo->drv, bo->meta.format,
849 bo->meta.use_flags)) {
850 xfer_params.xfers_needed = 1;
851 xfer_params.xfer_boxes[0] = mapping->rect;
852 } else {
853 assert(virtio_gpu_supports_combination_through_emulation(bo->drv, bo->meta.format,
854 bo->meta.use_flags));
855
856 virtio_gpu_get_emulated_transfers_params(bo, &mapping->rect, &xfer_params);
857 }
858
859 for (i = 0; i < xfer_params.xfers_needed; i++) {
860 xfer.box.x = xfer_params.xfer_boxes[i].x;
861 xfer.box.y = xfer_params.xfer_boxes[i].y;
862 xfer.box.w = xfer_params.xfer_boxes[i].width;
863 xfer.box.h = xfer_params.xfer_boxes[i].height;
864 xfer.box.d = 1;
865
866 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &xfer);
867 if (ret) {
868 drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST failed with %s\n",
869 strerror(errno));
870 return -errno;
871 }
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700872 }
873
David Stevens4d5358d2019-10-24 14:59:31 +0900874 // The transfer needs to complete before invalidate returns so that any host changes
875 // are visible and to ensure the host doesn't overwrite subsequent guest changes.
876 // TODO(b/136733358): Support returning fences from transfers
David Stevens4d5358d2019-10-24 14:59:31 +0900877 waitcmd.handle = mapping->vma->handle;
878 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
879 if (ret) {
880 drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
881 return -errno;
882 }
883
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700884 return 0;
885}
886
887static int virtio_gpu_bo_flush(struct bo *bo, struct mapping *mapping)
888{
889 int ret;
Jason Macnak1de7f662020-01-24 15:05:57 -0800890 size_t i;
Gurchetan Singh99644382020-10-07 15:28:11 -0700891 struct drm_virtgpu_3d_transfer_to_host xfer = { 0 };
892 struct drm_virtgpu_3d_wait waitcmd = { 0 };
Jason Macnak1de7f662020-01-24 15:05:57 -0800893 struct virtio_transfers_params xfer_params;
894 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
Lepton Wu249e8632018-04-05 12:50:03 -0700895
Gurchetan Singhd708f612019-09-12 17:26:45 -0700896 if (!features[feat_3d].enabled)
Lepton Wu249e8632018-04-05 12:50:03 -0700897 return 0;
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700898
899 if (!(mapping->vma->map_flags & BO_MAP_WRITE))
900 return 0;
901
Gurchetan Singh0ee06fb2019-09-13 17:49:20 -0700902 if (features[feat_resource_blob].enabled &&
903 (bo->meta.tiling & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
904 return 0;
905
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700906 xfer.bo_handle = mapping->vma->handle;
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700907
Gurchetan Singh1b57fe22020-05-05 09:18:22 -0700908 if (mapping->rect.x || mapping->rect.y) {
Gurchetan Singh1b57fe22020-05-05 09:18:22 -0700909 /*
910 * virglrenderer uses the box parameters and assumes that offset == 0 for planar
911 * images
912 */
913 if (bo->meta.num_planes == 1) {
914 xfer.offset =
915 (bo->meta.strides[0] * mapping->rect.y) +
916 drv_bytes_per_pixel_from_format(bo->meta.format, 0) * mapping->rect.x;
917 }
918 }
919
Gurchetan Singh05e67cc2019-06-28 17:21:40 -0700920 // Unfortunately, the kernel doesn't actually pass the guest layer_stride and
921 // guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). We can use
922 // the level to work around this.
Gurchetan Singhcadc54f2021-02-01 12:03:11 -0800923 if (priv->host_gbm_enabled)
Jason Macnak1de7f662020-01-24 15:05:57 -0800924 xfer.level = bo->meta.strides[0];
Gurchetan Singh05e67cc2019-06-28 17:21:40 -0700925
Jason Macnak1de7f662020-01-24 15:05:57 -0800926 if (virtio_gpu_supports_combination_natively(bo->drv, bo->meta.format,
927 bo->meta.use_flags)) {
928 xfer_params.xfers_needed = 1;
929 xfer_params.xfer_boxes[0] = mapping->rect;
930 } else {
931 assert(virtio_gpu_supports_combination_through_emulation(bo->drv, bo->meta.format,
932 bo->meta.use_flags));
933
934 virtio_gpu_get_emulated_transfers_params(bo, &mapping->rect, &xfer_params);
935 }
936
937 for (i = 0; i < xfer_params.xfers_needed; i++) {
938 xfer.box.x = xfer_params.xfer_boxes[i].x;
939 xfer.box.y = xfer_params.xfer_boxes[i].y;
940 xfer.box.w = xfer_params.xfer_boxes[i].width;
941 xfer.box.h = xfer_params.xfer_boxes[i].height;
942 xfer.box.d = 1;
943
944 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &xfer);
945 if (ret) {
946 drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST failed with %s\n",
947 strerror(errno));
948 return -errno;
949 }
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700950 }
951
David Stevensbaab6c82020-02-26 17:14:43 +0900952 // If the buffer is only accessed by the host GPU, then the flush is ordered
953 // with subsequent commands. However, if other host hardware can access the
954 // buffer, we need to wait for the transfer to complete for consistency.
955 // TODO(b/136733358): Support returning fences from transfers
956 if (bo->meta.use_flags & BO_USE_NON_GPU_HW) {
David Stevensbaab6c82020-02-26 17:14:43 +0900957 waitcmd.handle = mapping->vma->handle;
958
959 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
960 if (ret) {
961 drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
962 return -errno;
963 }
964 }
965
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700966 return 0;
967}
968
Gurchetan Singh0d44d482019-06-04 19:39:51 -0700969static uint32_t virtio_gpu_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700970{
971 switch (format) {
972 case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
Keiichi Watanabea13dda72018-08-02 22:45:05 +0900973 /* Camera subsystem requires NV12. */
974 if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE))
975 return DRM_FORMAT_NV12;
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700976 /*HACK: See b/28671744 */
977 return DRM_FORMAT_XBGR8888;
Lepton Wu249e8632018-04-05 12:50:03 -0700978 case DRM_FORMAT_FLEX_YCbCr_420_888:
Gurchetan Singhf5d280d2019-06-04 19:43:41 -0700979 /*
980 * All of our host drivers prefer NV12 as their flexible media format.
981 * If that changes, this will need to be modified.
982 */
Gurchetan Singhd708f612019-09-12 17:26:45 -0700983 if (features[feat_3d].enabled)
Gurchetan Singhf5d280d2019-06-04 19:43:41 -0700984 return DRM_FORMAT_NV12;
985 else
Jason Macnak1de7f662020-01-24 15:05:57 -0800986 return DRM_FORMAT_YVU420_ANDROID;
Zach Reizner85c4c5f2017-10-04 13:15:57 -0700987 default:
988 return format;
989 }
990}
991
Gurchetan Singhbc4f0232019-06-27 20:05:54 -0700992static int virtio_gpu_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
993 uint32_t offsets[DRV_MAX_PLANES])
994{
995 int ret;
Chia-I Wu2e41f632021-01-11 11:08:21 -0800996 struct drm_virtgpu_resource_info_cros res_info = { 0 };
Gurchetan Singhbc4f0232019-06-27 20:05:54 -0700997
Gurchetan Singhd708f612019-09-12 17:26:45 -0700998 if (!features[feat_3d].enabled)
Gurchetan Singhbc4f0232019-06-27 20:05:54 -0700999 return 0;
1000
Gurchetan Singhbc4f0232019-06-27 20:05:54 -07001001 res_info.bo_handle = bo->handles[0].u32;
Chia-I Wu50855622021-01-12 12:38:09 -08001002 res_info.type = VIRTGPU_RESOURCE_INFO_TYPE_EXTENDED;
Chia-I Wu2e41f632021-01-11 11:08:21 -08001003 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO_CROS, &res_info);
Gurchetan Singhbc4f0232019-06-27 20:05:54 -07001004 if (ret) {
1005 drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_INFO failed with %s\n", strerror(errno));
1006 return ret;
1007 }
1008
1009 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) {
1010 /*
1011 * Currently, kernel v4.14 (Betty) doesn't have the extended resource info
1012 * ioctl.
1013 */
1014 if (res_info.strides[plane]) {
1015 strides[plane] = res_info.strides[plane];
1016 offsets[plane] = res_info.offsets[plane];
1017 }
1018 }
1019
1020 return 0;
1021}
1022
Lepton Wu249e8632018-04-05 12:50:03 -07001023const struct backend backend_virtio_gpu = {
Zach Reizner85c4c5f2017-10-04 13:15:57 -07001024 .name = "virtio_gpu",
1025 .init = virtio_gpu_init,
Lepton Wu249e8632018-04-05 12:50:03 -07001026 .close = virtio_gpu_close,
Zach Reizner85c4c5f2017-10-04 13:15:57 -07001027 .bo_create = virtio_gpu_bo_create,
Lepton Wu249e8632018-04-05 12:50:03 -07001028 .bo_destroy = virtio_gpu_bo_destroy,
Zach Reizner85c4c5f2017-10-04 13:15:57 -07001029 .bo_import = drv_prime_bo_import,
Lepton Wu249e8632018-04-05 12:50:03 -07001030 .bo_map = virtio_gpu_bo_map,
Zach Reizner85c4c5f2017-10-04 13:15:57 -07001031 .bo_unmap = drv_bo_munmap,
1032 .bo_invalidate = virtio_gpu_bo_invalidate,
1033 .bo_flush = virtio_gpu_bo_flush,
1034 .resolve_format = virtio_gpu_resolve_format,
Gurchetan Singhbc4f0232019-06-27 20:05:54 -07001035 .resource_info = virtio_gpu_resource_info,
Zach Reizner85c4c5f2017-10-04 13:15:57 -07001036};