Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2017 The Chromium OS Authors. All rights reserved. |
| 3 | * Use of this source code is governed by a BSD-style license that can be |
| 4 | * found in the LICENSE file. |
| 5 | */ |
| 6 | |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 7 | #include <errno.h> |
| 8 | #include <stdint.h> |
| 9 | #include <stdio.h> |
| 10 | #include <string.h> |
| 11 | #include <sys/mman.h> |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 12 | #include <xf86drm.h> |
| 13 | |
| 14 | #include "drv_priv.h" |
| 15 | #include "helpers.h" |
| 16 | #include "util.h" |
| 17 | #include "virgl_hw.h" |
Gurchetan Singh | 69bc430 | 2020-02-05 18:18:52 -0800 | [diff] [blame] | 18 | #include "virtgpu_drm.h" |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 19 | |
Tao Wu | 3381588 | 2018-03-12 18:07:43 -0700 | [diff] [blame] | 20 | #ifndef PAGE_SIZE |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 21 | #define PAGE_SIZE 0x1000 |
Tao Wu | 3381588 | 2018-03-12 18:07:43 -0700 | [diff] [blame] | 22 | #endif |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 23 | #define PIPE_TEXTURE_2D 2 |
| 24 | |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 25 | #define MESA_LLVMPIPE_TILE_ORDER 6 |
| 26 | #define MESA_LLVMPIPE_TILE_SIZE (1 << MESA_LLVMPIPE_TILE_ORDER) |
| 27 | |
Gurchetan Singh | d708f61 | 2019-09-12 17:26:45 -0700 | [diff] [blame^] | 28 | struct feature { |
| 29 | uint64_t feature; |
| 30 | const char *name; |
| 31 | uint32_t enabled; |
| 32 | }; |
| 33 | |
| 34 | enum feature_id { |
| 35 | feat_3d, |
| 36 | feat_capset_fix, |
| 37 | feat_max, |
| 38 | }; |
| 39 | |
| 40 | #define FEATURE(x) \ |
| 41 | (struct feature) \ |
| 42 | { \ |
| 43 | x, #x, 0 \ |
| 44 | } |
| 45 | |
| 46 | static struct feature features[] = { FEATURE(VIRTGPU_PARAM_3D_FEATURES), |
| 47 | FEATURE(VIRTGPU_PARAM_CAPSET_QUERY_FIX) }; |
| 48 | |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 49 | static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888, |
Gurchetan Singh | 71bc665 | 2018-09-17 17:42:05 -0700 | [diff] [blame] | 50 | DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888, |
| 51 | DRM_FORMAT_XRGB8888 }; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 52 | |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 53 | static const uint32_t dumb_texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_YVU420, |
Lepton Wu | 37be436 | 2019-11-08 22:58:12 -0800 | [diff] [blame] | 54 | DRM_FORMAT_NV12, |
Gurchetan Singh | 3f3e5f9 | 2019-07-08 09:50:01 -0700 | [diff] [blame] | 55 | DRM_FORMAT_YVU420_ANDROID }; |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 56 | |
Gurchetan Singh | f5d280d | 2019-06-04 19:43:41 -0700 | [diff] [blame] | 57 | static const uint32_t texture_source_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_R8, DRM_FORMAT_RG88, |
| 58 | DRM_FORMAT_YVU420_ANDROID }; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 59 | |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 60 | struct virtio_gpu_priv { |
Lepton Wu | eebce65 | 2020-02-26 15:13:34 -0800 | [diff] [blame] | 61 | int caps_is_v2; |
Jason Macnak | ddf4ec0 | 2020-02-03 16:36:46 -0800 | [diff] [blame] | 62 | union virgl_caps caps; |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 63 | }; |
| 64 | |
Kansho Nishida | d97877b | 2019-06-14 18:28:18 +0900 | [diff] [blame] | 65 | static uint32_t translate_format(uint32_t drm_fourcc) |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 66 | { |
| 67 | switch (drm_fourcc) { |
| 68 | case DRM_FORMAT_XRGB8888: |
| 69 | return VIRGL_FORMAT_B8G8R8X8_UNORM; |
| 70 | case DRM_FORMAT_ARGB8888: |
| 71 | return VIRGL_FORMAT_B8G8R8A8_UNORM; |
| 72 | case DRM_FORMAT_XBGR8888: |
| 73 | return VIRGL_FORMAT_R8G8B8X8_UNORM; |
| 74 | case DRM_FORMAT_ABGR8888: |
| 75 | return VIRGL_FORMAT_R8G8B8A8_UNORM; |
| 76 | case DRM_FORMAT_RGB565: |
| 77 | return VIRGL_FORMAT_B5G6R5_UNORM; |
| 78 | case DRM_FORMAT_R8: |
| 79 | return VIRGL_FORMAT_R8_UNORM; |
| 80 | case DRM_FORMAT_RG88: |
| 81 | return VIRGL_FORMAT_R8G8_UNORM; |
Gurchetan Singh | f5d280d | 2019-06-04 19:43:41 -0700 | [diff] [blame] | 82 | case DRM_FORMAT_NV12: |
| 83 | return VIRGL_FORMAT_NV12; |
| 84 | case DRM_FORMAT_YVU420: |
| 85 | case DRM_FORMAT_YVU420_ANDROID: |
| 86 | return VIRGL_FORMAT_YV12; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 87 | default: |
| 88 | return 0; |
| 89 | } |
| 90 | } |
| 91 | |
Jason Macnak | ddf4ec0 | 2020-02-03 16:36:46 -0800 | [diff] [blame] | 92 | static bool virtio_gpu_supports_format(struct virgl_supported_format_mask *supported, |
| 93 | uint32_t drm_format) |
| 94 | { |
| 95 | uint32_t virgl_format = translate_format(drm_format); |
| 96 | if (!virgl_format) { |
| 97 | return false; |
| 98 | } |
| 99 | |
| 100 | uint32_t bitmask_index = virgl_format / 32; |
| 101 | uint32_t bit_index = virgl_format % 32; |
| 102 | return supported->bitmask[bitmask_index] & (1 << bit_index); |
| 103 | } |
| 104 | |
| 105 | // Adds the given buffer combination to the list of supported buffer combinations if the |
| 106 | // combination is supported by the virtio backend. |
| 107 | static void virtio_gpu_add_combination(struct driver *drv, uint32_t drm_format, |
| 108 | struct format_metadata *metadata, uint64_t use_flags) |
| 109 | { |
| 110 | struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv; |
| 111 | |
Gurchetan Singh | d708f61 | 2019-09-12 17:26:45 -0700 | [diff] [blame^] | 112 | if (features[feat_3d].enabled && priv->caps.max_version >= 1) { |
Lepton Wu | eebce65 | 2020-02-26 15:13:34 -0800 | [diff] [blame] | 113 | if ((use_flags & BO_USE_RENDERING) && |
Jason Macnak | ddf4ec0 | 2020-02-03 16:36:46 -0800 | [diff] [blame] | 114 | !virtio_gpu_supports_format(&priv->caps.v1.render, drm_format)) { |
| 115 | drv_log("Skipping unsupported render format: %d\n", drm_format); |
| 116 | return; |
| 117 | } |
| 118 | |
Lepton Wu | eebce65 | 2020-02-26 15:13:34 -0800 | [diff] [blame] | 119 | if ((use_flags & BO_USE_TEXTURE) && |
Jason Macnak | ddf4ec0 | 2020-02-03 16:36:46 -0800 | [diff] [blame] | 120 | !virtio_gpu_supports_format(&priv->caps.v1.sampler, drm_format)) { |
| 121 | drv_log("Skipping unsupported texture format: %d\n", drm_format); |
| 122 | return; |
| 123 | } |
Lepton Wu | eebce65 | 2020-02-26 15:13:34 -0800 | [diff] [blame] | 124 | if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 && |
| 125 | !virtio_gpu_supports_format(&priv->caps.v2.scanout, drm_format)) { |
| 126 | drv_log("Unsupported scanout format: %d\n", drm_format); |
| 127 | use_flags &= ~BO_USE_SCANOUT; |
| 128 | } |
Jason Macnak | ddf4ec0 | 2020-02-03 16:36:46 -0800 | [diff] [blame] | 129 | } |
| 130 | |
| 131 | drv_add_combination(drv, drm_format, metadata, use_flags); |
| 132 | } |
| 133 | |
| 134 | // Adds each given buffer combination to the list of supported buffer combinations if the |
| 135 | // combination supported by the virtio backend. |
| 136 | static void virtio_gpu_add_combinations(struct driver *drv, const uint32_t *drm_formats, |
| 137 | uint32_t num_formats, struct format_metadata *metadata, |
| 138 | uint64_t use_flags) |
| 139 | { |
| 140 | uint32_t i; |
| 141 | |
| 142 | for (i = 0; i < num_formats; i++) { |
| 143 | virtio_gpu_add_combination(drv, drm_formats[i], metadata, use_flags); |
| 144 | } |
| 145 | } |
| 146 | |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 147 | static int virtio_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, |
| 148 | uint64_t use_flags) |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 149 | { |
Gurchetan Singh | 298b757 | 2019-09-19 09:55:18 -0700 | [diff] [blame] | 150 | if (bo->meta.format != DRM_FORMAT_R8) { |
Keiichi Watanabe | a13dda7 | 2018-08-02 22:45:05 +0900 | [diff] [blame] | 151 | width = ALIGN(width, MESA_LLVMPIPE_TILE_SIZE); |
| 152 | height = ALIGN(height, MESA_LLVMPIPE_TILE_SIZE); |
| 153 | } |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 154 | |
Dominik Behr | 6e6dc49 | 2019-10-09 15:43:52 -0700 | [diff] [blame] | 155 | return drv_dumb_bo_create_ex(bo, width, height, format, use_flags, BO_QUIRK_DUMB32BPP); |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 156 | } |
| 157 | |
Lepton Wu | dbab083 | 2019-04-19 12:26:39 -0700 | [diff] [blame] | 158 | static inline void handle_flag(uint64_t *flag, uint64_t check_flag, uint32_t *bind, |
| 159 | uint32_t virgl_bind) |
| 160 | { |
| 161 | if ((*flag) & check_flag) { |
| 162 | (*flag) &= ~check_flag; |
| 163 | (*bind) |= virgl_bind; |
| 164 | } |
| 165 | } |
| 166 | |
| 167 | static uint32_t use_flags_to_bind(uint64_t use_flags) |
| 168 | { |
Kansho Nishida | d97877b | 2019-06-14 18:28:18 +0900 | [diff] [blame] | 169 | /* In crosvm, VIRGL_BIND_SHARED means minigbm will allocate, not virglrenderer. */ |
| 170 | uint32_t bind = VIRGL_BIND_SHARED; |
Lepton Wu | dbab083 | 2019-04-19 12:26:39 -0700 | [diff] [blame] | 171 | |
| 172 | handle_flag(&use_flags, BO_USE_TEXTURE, &bind, VIRGL_BIND_SAMPLER_VIEW); |
| 173 | handle_flag(&use_flags, BO_USE_RENDERING, &bind, VIRGL_BIND_RENDER_TARGET); |
| 174 | handle_flag(&use_flags, BO_USE_SCANOUT, &bind, VIRGL_BIND_SCANOUT); |
David Stevens | 55a6cf9 | 2019-09-03 10:45:33 +0900 | [diff] [blame] | 175 | handle_flag(&use_flags, BO_USE_CURSOR, &bind, VIRGL_BIND_CURSOR); |
| 176 | handle_flag(&use_flags, BO_USE_LINEAR, &bind, VIRGL_BIND_LINEAR); |
| 177 | |
| 178 | handle_flag(&use_flags, BO_USE_SW_READ_OFTEN, &bind, VIRGL_BIND_LINEAR); |
| 179 | handle_flag(&use_flags, BO_USE_SW_READ_RARELY, &bind, VIRGL_BIND_LINEAR); |
| 180 | handle_flag(&use_flags, BO_USE_SW_WRITE_OFTEN, &bind, VIRGL_BIND_LINEAR); |
| 181 | handle_flag(&use_flags, BO_USE_SW_WRITE_RARELY, &bind, VIRGL_BIND_LINEAR); |
| 182 | |
| 183 | // All host drivers only support linear camera buffer formats. If |
| 184 | // that changes, this will need to be modified. |
| 185 | handle_flag(&use_flags, BO_USE_CAMERA_READ, &bind, VIRGL_BIND_LINEAR); |
| 186 | handle_flag(&use_flags, BO_USE_CAMERA_WRITE, &bind, VIRGL_BIND_LINEAR); |
| 187 | |
Lepton Wu | dbab083 | 2019-04-19 12:26:39 -0700 | [diff] [blame] | 188 | if (use_flags) { |
| 189 | drv_log("Unhandled bo use flag: %llx\n", (unsigned long long)use_flags); |
| 190 | } |
Kansho Nishida | d97877b | 2019-06-14 18:28:18 +0900 | [diff] [blame] | 191 | |
Lepton Wu | dbab083 | 2019-04-19 12:26:39 -0700 | [diff] [blame] | 192 | return bind; |
| 193 | } |
| 194 | |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 195 | static int virtio_virgl_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, |
| 196 | uint64_t use_flags) |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 197 | { |
| 198 | int ret; |
Kansho Nishida | d97877b | 2019-06-14 18:28:18 +0900 | [diff] [blame] | 199 | uint32_t stride; |
| 200 | struct drm_virtgpu_resource_create res_create; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 201 | |
Kansho Nishida | d97877b | 2019-06-14 18:28:18 +0900 | [diff] [blame] | 202 | stride = drv_stride_from_format(format, width, 0); |
| 203 | drv_bo_from_format(bo, stride, height, format); |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 204 | |
Kansho Nishida | d97877b | 2019-06-14 18:28:18 +0900 | [diff] [blame] | 205 | /* |
| 206 | * Setting the target is intended to ensure this resource gets bound as a 2D |
| 207 | * texture in the host renderer's GL state. All of these resource properties are |
| 208 | * sent unchanged by the kernel to the host, which in turn sends them unchanged to |
| 209 | * virglrenderer. When virglrenderer makes a resource, it will convert the target |
| 210 | * enum to the equivalent one in GL and then bind the resource to that target. |
| 211 | */ |
| 212 | memset(&res_create, 0, sizeof(res_create)); |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 213 | |
Kansho Nishida | d97877b | 2019-06-14 18:28:18 +0900 | [diff] [blame] | 214 | res_create.target = PIPE_TEXTURE_2D; |
| 215 | res_create.format = translate_format(format); |
| 216 | res_create.bind = use_flags_to_bind(use_flags); |
| 217 | res_create.width = width; |
| 218 | res_create.height = height; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 219 | |
Kansho Nishida | d97877b | 2019-06-14 18:28:18 +0900 | [diff] [blame] | 220 | /* For virgl 3D */ |
| 221 | res_create.depth = 1; |
| 222 | res_create.array_size = 1; |
| 223 | res_create.last_level = 0; |
| 224 | res_create.nr_samples = 0; |
| 225 | |
Gurchetan Singh | 298b757 | 2019-09-19 09:55:18 -0700 | [diff] [blame] | 226 | res_create.size = ALIGN(bo->meta.total_size, PAGE_SIZE); // PAGE_SIZE = 0x1000 |
Kansho Nishida | d97877b | 2019-06-14 18:28:18 +0900 | [diff] [blame] | 227 | ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &res_create); |
| 228 | if (ret) { |
| 229 | drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_CREATE failed with %s\n", strerror(errno)); |
| 230 | return ret; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 231 | } |
| 232 | |
Gurchetan Singh | 298b757 | 2019-09-19 09:55:18 -0700 | [diff] [blame] | 233 | for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) |
Kansho Nishida | d97877b | 2019-06-14 18:28:18 +0900 | [diff] [blame] | 234 | bo->handles[plane].u32 = res_create.bo_handle; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 235 | |
| 236 | return 0; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 237 | } |
| 238 | |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 239 | static void *virtio_virgl_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags) |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 240 | { |
| 241 | int ret; |
| 242 | struct drm_virtgpu_map gem_map; |
| 243 | |
| 244 | memset(&gem_map, 0, sizeof(gem_map)); |
| 245 | gem_map.handle = bo->handles[0].u32; |
| 246 | |
| 247 | ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map); |
| 248 | if (ret) { |
Alistair Strachan | 0cfaaa5 | 2018-03-19 14:03:23 -0700 | [diff] [blame] | 249 | drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno)); |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 250 | return MAP_FAILED; |
| 251 | } |
| 252 | |
Gurchetan Singh | 298b757 | 2019-09-19 09:55:18 -0700 | [diff] [blame] | 253 | vma->length = bo->meta.total_size; |
| 254 | return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd, |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 255 | gem_map.offset); |
| 256 | } |
| 257 | |
Lepton Wu | eebce65 | 2020-02-26 15:13:34 -0800 | [diff] [blame] | 258 | static int virtio_gpu_get_caps(struct driver *drv, union virgl_caps *caps, int *caps_is_v2) |
Jason Macnak | ddf4ec0 | 2020-02-03 16:36:46 -0800 | [diff] [blame] | 259 | { |
| 260 | int ret; |
| 261 | struct drm_virtgpu_get_caps cap_args; |
Jason Macnak | ddf4ec0 | 2020-02-03 16:36:46 -0800 | [diff] [blame] | 262 | |
Lepton Wu | eebce65 | 2020-02-26 15:13:34 -0800 | [diff] [blame] | 263 | *caps_is_v2 = 0; |
Jason Macnak | ddf4ec0 | 2020-02-03 16:36:46 -0800 | [diff] [blame] | 264 | memset(&cap_args, 0, sizeof(cap_args)); |
| 265 | cap_args.addr = (unsigned long long)caps; |
Gurchetan Singh | d708f61 | 2019-09-12 17:26:45 -0700 | [diff] [blame^] | 266 | if (features[feat_capset_fix].enabled) { |
Lepton Wu | eebce65 | 2020-02-26 15:13:34 -0800 | [diff] [blame] | 267 | *caps_is_v2 = 1; |
Jason Macnak | ddf4ec0 | 2020-02-03 16:36:46 -0800 | [diff] [blame] | 268 | cap_args.cap_set_id = 2; |
| 269 | cap_args.size = sizeof(union virgl_caps); |
| 270 | } else { |
| 271 | cap_args.cap_set_id = 1; |
| 272 | cap_args.size = sizeof(struct virgl_caps_v1); |
| 273 | } |
| 274 | |
| 275 | ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &cap_args); |
| 276 | if (ret) { |
| 277 | drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno)); |
Lepton Wu | eebce65 | 2020-02-26 15:13:34 -0800 | [diff] [blame] | 278 | *caps_is_v2 = 0; |
Jason Macnak | ddf4ec0 | 2020-02-03 16:36:46 -0800 | [diff] [blame] | 279 | |
| 280 | // Fallback to v1 |
| 281 | cap_args.cap_set_id = 1; |
| 282 | cap_args.size = sizeof(struct virgl_caps_v1); |
| 283 | |
| 284 | ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &cap_args); |
| 285 | if (ret) { |
| 286 | drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno)); |
| 287 | } |
| 288 | } |
| 289 | |
| 290 | return ret; |
| 291 | } |
| 292 | |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 293 | static int virtio_gpu_init(struct driver *drv) |
| 294 | { |
| 295 | int ret; |
| 296 | struct virtio_gpu_priv *priv; |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 297 | |
| 298 | priv = calloc(1, sizeof(*priv)); |
| 299 | drv->priv = priv; |
Gurchetan Singh | d708f61 | 2019-09-12 17:26:45 -0700 | [diff] [blame^] | 300 | for (uint32_t i = 0; i < ARRAY_SIZE(features); i++) { |
| 301 | struct drm_virtgpu_getparam params = { 0 }; |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 302 | |
Gurchetan Singh | d708f61 | 2019-09-12 17:26:45 -0700 | [diff] [blame^] | 303 | params.param = features[i].feature; |
| 304 | params.value = (uint64_t)(uintptr_t)&features[i].enabled; |
| 305 | ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, ¶ms); |
| 306 | if (ret) |
| 307 | drv_log("DRM_IOCTL_VIRTGPU_GET_PARAM failed with %s\n", strerror(errno)); |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 308 | } |
| 309 | |
Gurchetan Singh | d708f61 | 2019-09-12 17:26:45 -0700 | [diff] [blame^] | 310 | if (features[feat_3d].enabled) { |
Lepton Wu | eebce65 | 2020-02-26 15:13:34 -0800 | [diff] [blame] | 311 | virtio_gpu_get_caps(drv, &priv->caps, &priv->caps_is_v2); |
Jason Macnak | ddf4ec0 | 2020-02-03 16:36:46 -0800 | [diff] [blame] | 312 | |
Dominik Behr | 6e6dc49 | 2019-10-09 15:43:52 -0700 | [diff] [blame] | 313 | /* This doesn't mean host can scanout everything, it just means host |
| 314 | * hypervisor can show it. */ |
Jason Macnak | ddf4ec0 | 2020-02-03 16:36:46 -0800 | [diff] [blame] | 315 | virtio_gpu_add_combinations(drv, render_target_formats, |
| 316 | ARRAY_SIZE(render_target_formats), &LINEAR_METADATA, |
| 317 | BO_USE_RENDER_MASK | BO_USE_SCANOUT); |
| 318 | virtio_gpu_add_combinations(drv, texture_source_formats, |
| 319 | ARRAY_SIZE(texture_source_formats), &LINEAR_METADATA, |
| 320 | BO_USE_TEXTURE_MASK); |
Gurchetan Singh | 3f3e5f9 | 2019-07-08 09:50:01 -0700 | [diff] [blame] | 321 | } else { |
Dominik Behr | 6e6dc49 | 2019-10-09 15:43:52 -0700 | [diff] [blame] | 322 | /* Virtio primary plane only allows this format. */ |
Jason Macnak | ddf4ec0 | 2020-02-03 16:36:46 -0800 | [diff] [blame] | 323 | virtio_gpu_add_combination(drv, DRM_FORMAT_XRGB8888, &LINEAR_METADATA, |
| 324 | BO_USE_RENDER_MASK | BO_USE_SCANOUT); |
Dominik Behr | 6e6dc49 | 2019-10-09 15:43:52 -0700 | [diff] [blame] | 325 | /* Virtio cursor plane only allows this format and Chrome cannot live without |
| 326 | * ARGB888 renderable format. */ |
Jason Macnak | ddf4ec0 | 2020-02-03 16:36:46 -0800 | [diff] [blame] | 327 | virtio_gpu_add_combination(drv, DRM_FORMAT_ARGB8888, &LINEAR_METADATA, |
| 328 | BO_USE_RENDER_MASK | BO_USE_CURSOR); |
Dominik Behr | 6e6dc49 | 2019-10-09 15:43:52 -0700 | [diff] [blame] | 329 | /* Android needs more, but they cannot be bound as scanouts anymore after |
| 330 | * "drm/virtio: fix DRM_FORMAT_* handling" */ |
Jason Macnak | ddf4ec0 | 2020-02-03 16:36:46 -0800 | [diff] [blame] | 331 | virtio_gpu_add_combinations(drv, render_target_formats, |
| 332 | ARRAY_SIZE(render_target_formats), &LINEAR_METADATA, |
| 333 | BO_USE_RENDER_MASK); |
| 334 | virtio_gpu_add_combinations(drv, dumb_texture_source_formats, |
| 335 | ARRAY_SIZE(dumb_texture_source_formats), |
| 336 | &LINEAR_METADATA, BO_USE_TEXTURE_MASK); |
| 337 | virtio_gpu_add_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA, |
| 338 | BO_USE_SW_MASK | BO_USE_LINEAR); |
Gurchetan Singh | 3f3e5f9 | 2019-07-08 09:50:01 -0700 | [diff] [blame] | 339 | } |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 340 | |
Gurchetan Singh | 71bc665 | 2018-09-17 17:42:05 -0700 | [diff] [blame] | 341 | /* Android CTS tests require this. */ |
Jason Macnak | ddf4ec0 | 2020-02-03 16:36:46 -0800 | [diff] [blame] | 342 | virtio_gpu_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK); |
Gurchetan Singh | 71bc665 | 2018-09-17 17:42:05 -0700 | [diff] [blame] | 343 | |
David Stevens | 9f7897f | 2019-08-09 20:20:23 +0900 | [diff] [blame] | 344 | drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA, |
Hirokazu Honda | 20e4a93 | 2019-12-06 15:21:45 +0900 | [diff] [blame] | 345 | BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER | |
| 346 | BO_USE_HW_VIDEO_ENCODER); |
Keiichi Watanabe | a13dda7 | 2018-08-02 22:45:05 +0900 | [diff] [blame] | 347 | drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA, |
| 348 | BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE); |
| 349 | |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 350 | return drv_modify_linear_combinations(drv); |
| 351 | } |
| 352 | |
| 353 | static void virtio_gpu_close(struct driver *drv) |
| 354 | { |
| 355 | free(drv->priv); |
| 356 | drv->priv = NULL; |
| 357 | } |
| 358 | |
| 359 | static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, |
| 360 | uint64_t use_flags) |
| 361 | { |
Gurchetan Singh | d708f61 | 2019-09-12 17:26:45 -0700 | [diff] [blame^] | 362 | if (features[feat_3d].enabled) |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 363 | return virtio_virgl_bo_create(bo, width, height, format, use_flags); |
| 364 | else |
| 365 | return virtio_dumb_bo_create(bo, width, height, format, use_flags); |
| 366 | } |
| 367 | |
| 368 | static int virtio_gpu_bo_destroy(struct bo *bo) |
| 369 | { |
Gurchetan Singh | d708f61 | 2019-09-12 17:26:45 -0700 | [diff] [blame^] | 370 | if (features[feat_3d].enabled) |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 371 | return drv_gem_bo_destroy(bo); |
| 372 | else |
| 373 | return drv_dumb_bo_destroy(bo); |
| 374 | } |
| 375 | |
| 376 | static void *virtio_gpu_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags) |
| 377 | { |
Gurchetan Singh | d708f61 | 2019-09-12 17:26:45 -0700 | [diff] [blame^] | 378 | if (features[feat_3d].enabled) |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 379 | return virtio_virgl_bo_map(bo, vma, plane, map_flags); |
| 380 | else |
| 381 | return drv_dumb_bo_map(bo, vma, plane, map_flags); |
| 382 | } |
| 383 | |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 384 | static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping) |
| 385 | { |
| 386 | int ret; |
| 387 | struct drm_virtgpu_3d_transfer_from_host xfer; |
David Stevens | 4d5358d | 2019-10-24 14:59:31 +0900 | [diff] [blame] | 388 | struct drm_virtgpu_3d_wait waitcmd; |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 389 | |
Gurchetan Singh | d708f61 | 2019-09-12 17:26:45 -0700 | [diff] [blame^] | 390 | if (!features[feat_3d].enabled) |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 391 | return 0; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 392 | |
David Stevens | 4d5358d | 2019-10-24 14:59:31 +0900 | [diff] [blame] | 393 | // Invalidate is only necessary if the host writes to the buffer. |
David Stevens | baab6c8 | 2020-02-26 17:14:43 +0900 | [diff] [blame] | 394 | if ((bo->meta.use_flags & (BO_USE_RENDERING | BO_USE_CAMERA_WRITE | |
| 395 | BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER)) == 0) |
David Stevens | 4d5358d | 2019-10-24 14:59:31 +0900 | [diff] [blame] | 396 | return 0; |
| 397 | |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 398 | memset(&xfer, 0, sizeof(xfer)); |
| 399 | xfer.bo_handle = mapping->vma->handle; |
| 400 | xfer.box.x = mapping->rect.x; |
| 401 | xfer.box.y = mapping->rect.y; |
| 402 | xfer.box.w = mapping->rect.width; |
| 403 | xfer.box.h = mapping->rect.height; |
| 404 | xfer.box.d = 1; |
| 405 | |
David Stevens | baab6c8 | 2020-02-26 17:14:43 +0900 | [diff] [blame] | 406 | if ((bo->meta.use_flags & BO_USE_RENDERING) == 0) { |
| 407 | // Unfortunately, the kernel doesn't actually pass the guest layer_stride and |
| 408 | // guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). For gbm |
| 409 | // based resources, we can work around this by using the level field to pass |
| 410 | // the stride to virglrenderer's gbm transfer code. However, we need to avoid |
| 411 | // doing this for resources which don't rely on that transfer code, which is |
| 412 | // resources with the BO_USE_RENDERING flag set. |
| 413 | // TODO(b/145993887): Send also stride when the patches are landed |
| 414 | xfer.level = bo->meta.strides[0]; |
| 415 | } |
Gurchetan Singh | 05e67cc | 2019-06-28 17:21:40 -0700 | [diff] [blame] | 416 | |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 417 | ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &xfer); |
| 418 | if (ret) { |
Alistair Strachan | 0cfaaa5 | 2018-03-19 14:03:23 -0700 | [diff] [blame] | 419 | drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST failed with %s\n", strerror(errno)); |
Stéphane Marchesin | 6ac299f | 2019-03-21 12:23:29 -0700 | [diff] [blame] | 420 | return -errno; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 421 | } |
| 422 | |
David Stevens | 4d5358d | 2019-10-24 14:59:31 +0900 | [diff] [blame] | 423 | // The transfer needs to complete before invalidate returns so that any host changes |
| 424 | // are visible and to ensure the host doesn't overwrite subsequent guest changes. |
| 425 | // TODO(b/136733358): Support returning fences from transfers |
| 426 | memset(&waitcmd, 0, sizeof(waitcmd)); |
| 427 | waitcmd.handle = mapping->vma->handle; |
| 428 | ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd); |
| 429 | if (ret) { |
| 430 | drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno)); |
| 431 | return -errno; |
| 432 | } |
| 433 | |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 434 | return 0; |
| 435 | } |
| 436 | |
| 437 | static int virtio_gpu_bo_flush(struct bo *bo, struct mapping *mapping) |
| 438 | { |
| 439 | int ret; |
| 440 | struct drm_virtgpu_3d_transfer_to_host xfer; |
David Stevens | baab6c8 | 2020-02-26 17:14:43 +0900 | [diff] [blame] | 441 | struct drm_virtgpu_3d_wait waitcmd; |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 442 | |
Gurchetan Singh | d708f61 | 2019-09-12 17:26:45 -0700 | [diff] [blame^] | 443 | if (!features[feat_3d].enabled) |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 444 | return 0; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 445 | |
| 446 | if (!(mapping->vma->map_flags & BO_MAP_WRITE)) |
| 447 | return 0; |
| 448 | |
| 449 | memset(&xfer, 0, sizeof(xfer)); |
| 450 | xfer.bo_handle = mapping->vma->handle; |
| 451 | xfer.box.x = mapping->rect.x; |
| 452 | xfer.box.y = mapping->rect.y; |
| 453 | xfer.box.w = mapping->rect.width; |
| 454 | xfer.box.h = mapping->rect.height; |
| 455 | xfer.box.d = 1; |
| 456 | |
Gurchetan Singh | 05e67cc | 2019-06-28 17:21:40 -0700 | [diff] [blame] | 457 | // Unfortunately, the kernel doesn't actually pass the guest layer_stride and |
| 458 | // guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). We can use |
| 459 | // the level to work around this. |
Gurchetan Singh | 298b757 | 2019-09-19 09:55:18 -0700 | [diff] [blame] | 460 | xfer.level = bo->meta.strides[0]; |
Gurchetan Singh | 05e67cc | 2019-06-28 17:21:40 -0700 | [diff] [blame] | 461 | |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 462 | ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &xfer); |
| 463 | if (ret) { |
Alistair Strachan | 0cfaaa5 | 2018-03-19 14:03:23 -0700 | [diff] [blame] | 464 | drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST failed with %s\n", strerror(errno)); |
Stéphane Marchesin | 6ac299f | 2019-03-21 12:23:29 -0700 | [diff] [blame] | 465 | return -errno; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 466 | } |
| 467 | |
David Stevens | baab6c8 | 2020-02-26 17:14:43 +0900 | [diff] [blame] | 468 | // If the buffer is only accessed by the host GPU, then the flush is ordered |
| 469 | // with subsequent commands. However, if other host hardware can access the |
| 470 | // buffer, we need to wait for the transfer to complete for consistency. |
| 471 | // TODO(b/136733358): Support returning fences from transfers |
| 472 | if (bo->meta.use_flags & BO_USE_NON_GPU_HW) { |
| 473 | memset(&waitcmd, 0, sizeof(waitcmd)); |
| 474 | waitcmd.handle = mapping->vma->handle; |
| 475 | |
| 476 | ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd); |
| 477 | if (ret) { |
| 478 | drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno)); |
| 479 | return -errno; |
| 480 | } |
| 481 | } |
| 482 | |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 483 | return 0; |
| 484 | } |
| 485 | |
Gurchetan Singh | 0d44d48 | 2019-06-04 19:39:51 -0700 | [diff] [blame] | 486 | static uint32_t virtio_gpu_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags) |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 487 | { |
Gurchetan Singh | f5d280d | 2019-06-04 19:43:41 -0700 | [diff] [blame] | 488 | |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 489 | switch (format) { |
| 490 | case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED: |
Keiichi Watanabe | a13dda7 | 2018-08-02 22:45:05 +0900 | [diff] [blame] | 491 | /* Camera subsystem requires NV12. */ |
| 492 | if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE)) |
| 493 | return DRM_FORMAT_NV12; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 494 | /*HACK: See b/28671744 */ |
| 495 | return DRM_FORMAT_XBGR8888; |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 496 | case DRM_FORMAT_FLEX_YCbCr_420_888: |
Gurchetan Singh | f5d280d | 2019-06-04 19:43:41 -0700 | [diff] [blame] | 497 | /* |
| 498 | * All of our host drivers prefer NV12 as their flexible media format. |
| 499 | * If that changes, this will need to be modified. |
| 500 | */ |
Gurchetan Singh | d708f61 | 2019-09-12 17:26:45 -0700 | [diff] [blame^] | 501 | if (features[feat_3d].enabled) |
Gurchetan Singh | f5d280d | 2019-06-04 19:43:41 -0700 | [diff] [blame] | 502 | return DRM_FORMAT_NV12; |
| 503 | else |
| 504 | return DRM_FORMAT_YVU420; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 505 | default: |
| 506 | return format; |
| 507 | } |
| 508 | } |
| 509 | |
Gurchetan Singh | bc4f023 | 2019-06-27 20:05:54 -0700 | [diff] [blame] | 510 | static int virtio_gpu_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES], |
| 511 | uint32_t offsets[DRV_MAX_PLANES]) |
| 512 | { |
| 513 | int ret; |
| 514 | struct drm_virtgpu_resource_info res_info; |
Gurchetan Singh | bc4f023 | 2019-06-27 20:05:54 -0700 | [diff] [blame] | 515 | |
Gurchetan Singh | d708f61 | 2019-09-12 17:26:45 -0700 | [diff] [blame^] | 516 | if (!features[feat_3d].enabled) |
Gurchetan Singh | bc4f023 | 2019-06-27 20:05:54 -0700 | [diff] [blame] | 517 | return 0; |
| 518 | |
| 519 | memset(&res_info, 0, sizeof(res_info)); |
| 520 | res_info.bo_handle = bo->handles[0].u32; |
| 521 | ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &res_info); |
| 522 | if (ret) { |
| 523 | drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_INFO failed with %s\n", strerror(errno)); |
| 524 | return ret; |
| 525 | } |
| 526 | |
| 527 | for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) { |
| 528 | /* |
| 529 | * Currently, kernel v4.14 (Betty) doesn't have the extended resource info |
| 530 | * ioctl. |
| 531 | */ |
| 532 | if (res_info.strides[plane]) { |
| 533 | strides[plane] = res_info.strides[plane]; |
| 534 | offsets[plane] = res_info.offsets[plane]; |
| 535 | } |
| 536 | } |
| 537 | |
| 538 | return 0; |
| 539 | } |
| 540 | |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 541 | const struct backend backend_virtio_gpu = { |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 542 | .name = "virtio_gpu", |
| 543 | .init = virtio_gpu_init, |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 544 | .close = virtio_gpu_close, |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 545 | .bo_create = virtio_gpu_bo_create, |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 546 | .bo_destroy = virtio_gpu_bo_destroy, |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 547 | .bo_import = drv_prime_bo_import, |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 548 | .bo_map = virtio_gpu_bo_map, |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 549 | .bo_unmap = drv_bo_munmap, |
| 550 | .bo_invalidate = virtio_gpu_bo_invalidate, |
| 551 | .bo_flush = virtio_gpu_bo_flush, |
| 552 | .resolve_format = virtio_gpu_resolve_format, |
Gurchetan Singh | bc4f023 | 2019-06-27 20:05:54 -0700 | [diff] [blame] | 553 | .resource_info = virtio_gpu_resource_info, |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 554 | }; |