Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2017 The Chromium OS Authors. All rights reserved. |
| 3 | * Use of this source code is governed by a BSD-style license that can be |
| 4 | * found in the LICENSE file. |
| 5 | */ |
| 6 | |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 7 | #include <errno.h> |
| 8 | #include <stdint.h> |
| 9 | #include <stdio.h> |
| 10 | #include <string.h> |
| 11 | #include <sys/mman.h> |
| 12 | #include <virtgpu_drm.h> |
| 13 | #include <xf86drm.h> |
| 14 | |
| 15 | #include "drv_priv.h" |
| 16 | #include "helpers.h" |
| 17 | #include "util.h" |
| 18 | #include "virgl_hw.h" |
| 19 | |
Tao Wu | 3381588 | 2018-03-12 18:07:43 -0700 | [diff] [blame] | 20 | #ifndef PAGE_SIZE |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 21 | #define PAGE_SIZE 0x1000 |
Tao Wu | 3381588 | 2018-03-12 18:07:43 -0700 | [diff] [blame] | 22 | #endif |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 23 | #define PIPE_TEXTURE_2D 2 |
| 24 | |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 25 | #define MESA_LLVMPIPE_TILE_ORDER 6 |
| 26 | #define MESA_LLVMPIPE_TILE_SIZE (1 << MESA_LLVMPIPE_TILE_ORDER) |
| 27 | |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 28 | static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888, |
Gurchetan Singh | 71bc665 | 2018-09-17 17:42:05 -0700 | [diff] [blame] | 29 | DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888, |
| 30 | DRM_FORMAT_XRGB8888 }; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 31 | |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 32 | static const uint32_t dumb_texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_YVU420, |
Lepton Wu | 37be436 | 2019-11-08 22:58:12 -0800 | [diff] [blame] | 33 | DRM_FORMAT_NV12, |
Gurchetan Singh | 3f3e5f9 | 2019-07-08 09:50:01 -0700 | [diff] [blame] | 34 | DRM_FORMAT_YVU420_ANDROID }; |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 35 | |
Gurchetan Singh | f5d280d | 2019-06-04 19:43:41 -0700 | [diff] [blame] | 36 | static const uint32_t texture_source_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_R8, DRM_FORMAT_RG88, |
| 37 | DRM_FORMAT_YVU420_ANDROID }; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 38 | |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 39 | struct virtio_gpu_priv { |
| 40 | int has_3d; |
| 41 | }; |
| 42 | |
Kansho Nishida | d97877b | 2019-06-14 18:28:18 +0900 | [diff] [blame] | 43 | static uint32_t translate_format(uint32_t drm_fourcc) |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 44 | { |
| 45 | switch (drm_fourcc) { |
| 46 | case DRM_FORMAT_XRGB8888: |
| 47 | return VIRGL_FORMAT_B8G8R8X8_UNORM; |
| 48 | case DRM_FORMAT_ARGB8888: |
| 49 | return VIRGL_FORMAT_B8G8R8A8_UNORM; |
| 50 | case DRM_FORMAT_XBGR8888: |
| 51 | return VIRGL_FORMAT_R8G8B8X8_UNORM; |
| 52 | case DRM_FORMAT_ABGR8888: |
| 53 | return VIRGL_FORMAT_R8G8B8A8_UNORM; |
| 54 | case DRM_FORMAT_RGB565: |
| 55 | return VIRGL_FORMAT_B5G6R5_UNORM; |
| 56 | case DRM_FORMAT_R8: |
| 57 | return VIRGL_FORMAT_R8_UNORM; |
| 58 | case DRM_FORMAT_RG88: |
| 59 | return VIRGL_FORMAT_R8G8_UNORM; |
Gurchetan Singh | f5d280d | 2019-06-04 19:43:41 -0700 | [diff] [blame] | 60 | case DRM_FORMAT_NV12: |
| 61 | return VIRGL_FORMAT_NV12; |
| 62 | case DRM_FORMAT_YVU420: |
| 63 | case DRM_FORMAT_YVU420_ANDROID: |
| 64 | return VIRGL_FORMAT_YV12; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 65 | default: |
| 66 | return 0; |
| 67 | } |
| 68 | } |
| 69 | |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 70 | static int virtio_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, |
| 71 | uint64_t use_flags) |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 72 | { |
Gurchetan Singh | 298b757 | 2019-09-19 09:55:18 -0700 | [diff] [blame] | 73 | if (bo->meta.format != DRM_FORMAT_R8) { |
Keiichi Watanabe | a13dda7 | 2018-08-02 22:45:05 +0900 | [diff] [blame] | 74 | width = ALIGN(width, MESA_LLVMPIPE_TILE_SIZE); |
| 75 | height = ALIGN(height, MESA_LLVMPIPE_TILE_SIZE); |
| 76 | } |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 77 | |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 78 | return drv_dumb_bo_create(bo, width, height, format, use_flags); |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 79 | } |
| 80 | |
Lepton Wu | dbab083 | 2019-04-19 12:26:39 -0700 | [diff] [blame] | 81 | static inline void handle_flag(uint64_t *flag, uint64_t check_flag, uint32_t *bind, |
| 82 | uint32_t virgl_bind) |
| 83 | { |
| 84 | if ((*flag) & check_flag) { |
| 85 | (*flag) &= ~check_flag; |
| 86 | (*bind) |= virgl_bind; |
| 87 | } |
| 88 | } |
| 89 | |
| 90 | static uint32_t use_flags_to_bind(uint64_t use_flags) |
| 91 | { |
Kansho Nishida | d97877b | 2019-06-14 18:28:18 +0900 | [diff] [blame] | 92 | /* In crosvm, VIRGL_BIND_SHARED means minigbm will allocate, not virglrenderer. */ |
| 93 | uint32_t bind = VIRGL_BIND_SHARED; |
Lepton Wu | dbab083 | 2019-04-19 12:26:39 -0700 | [diff] [blame] | 94 | |
| 95 | handle_flag(&use_flags, BO_USE_TEXTURE, &bind, VIRGL_BIND_SAMPLER_VIEW); |
| 96 | handle_flag(&use_flags, BO_USE_RENDERING, &bind, VIRGL_BIND_RENDER_TARGET); |
| 97 | handle_flag(&use_flags, BO_USE_SCANOUT, &bind, VIRGL_BIND_SCANOUT); |
David Stevens | 55a6cf9 | 2019-09-03 10:45:33 +0900 | [diff] [blame] | 98 | handle_flag(&use_flags, BO_USE_CURSOR, &bind, VIRGL_BIND_CURSOR); |
| 99 | handle_flag(&use_flags, BO_USE_LINEAR, &bind, VIRGL_BIND_LINEAR); |
| 100 | |
| 101 | handle_flag(&use_flags, BO_USE_SW_READ_OFTEN, &bind, VIRGL_BIND_LINEAR); |
| 102 | handle_flag(&use_flags, BO_USE_SW_READ_RARELY, &bind, VIRGL_BIND_LINEAR); |
| 103 | handle_flag(&use_flags, BO_USE_SW_WRITE_OFTEN, &bind, VIRGL_BIND_LINEAR); |
| 104 | handle_flag(&use_flags, BO_USE_SW_WRITE_RARELY, &bind, VIRGL_BIND_LINEAR); |
| 105 | |
| 106 | // All host drivers only support linear camera buffer formats. If |
| 107 | // that changes, this will need to be modified. |
| 108 | handle_flag(&use_flags, BO_USE_CAMERA_READ, &bind, VIRGL_BIND_LINEAR); |
| 109 | handle_flag(&use_flags, BO_USE_CAMERA_WRITE, &bind, VIRGL_BIND_LINEAR); |
| 110 | |
Lepton Wu | dbab083 | 2019-04-19 12:26:39 -0700 | [diff] [blame] | 111 | if (use_flags) { |
| 112 | drv_log("Unhandled bo use flag: %llx\n", (unsigned long long)use_flags); |
| 113 | } |
Kansho Nishida | d97877b | 2019-06-14 18:28:18 +0900 | [diff] [blame] | 114 | |
Lepton Wu | dbab083 | 2019-04-19 12:26:39 -0700 | [diff] [blame] | 115 | return bind; |
| 116 | } |
| 117 | |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 118 | static int virtio_virgl_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, |
| 119 | uint64_t use_flags) |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 120 | { |
| 121 | int ret; |
Kansho Nishida | d97877b | 2019-06-14 18:28:18 +0900 | [diff] [blame] | 122 | uint32_t stride; |
| 123 | struct drm_virtgpu_resource_create res_create; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 124 | |
Kansho Nishida | d97877b | 2019-06-14 18:28:18 +0900 | [diff] [blame] | 125 | stride = drv_stride_from_format(format, width, 0); |
| 126 | drv_bo_from_format(bo, stride, height, format); |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 127 | |
Kansho Nishida | d97877b | 2019-06-14 18:28:18 +0900 | [diff] [blame] | 128 | /* |
| 129 | * Setting the target is intended to ensure this resource gets bound as a 2D |
| 130 | * texture in the host renderer's GL state. All of these resource properties are |
| 131 | * sent unchanged by the kernel to the host, which in turn sends them unchanged to |
| 132 | * virglrenderer. When virglrenderer makes a resource, it will convert the target |
| 133 | * enum to the equivalent one in GL and then bind the resource to that target. |
| 134 | */ |
| 135 | memset(&res_create, 0, sizeof(res_create)); |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 136 | |
Kansho Nishida | d97877b | 2019-06-14 18:28:18 +0900 | [diff] [blame] | 137 | res_create.target = PIPE_TEXTURE_2D; |
| 138 | res_create.format = translate_format(format); |
| 139 | res_create.bind = use_flags_to_bind(use_flags); |
| 140 | res_create.width = width; |
| 141 | res_create.height = height; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 142 | |
Kansho Nishida | d97877b | 2019-06-14 18:28:18 +0900 | [diff] [blame] | 143 | /* For virgl 3D */ |
| 144 | res_create.depth = 1; |
| 145 | res_create.array_size = 1; |
| 146 | res_create.last_level = 0; |
| 147 | res_create.nr_samples = 0; |
| 148 | |
Gurchetan Singh | 298b757 | 2019-09-19 09:55:18 -0700 | [diff] [blame] | 149 | res_create.size = ALIGN(bo->meta.total_size, PAGE_SIZE); // PAGE_SIZE = 0x1000 |
Kansho Nishida | d97877b | 2019-06-14 18:28:18 +0900 | [diff] [blame] | 150 | ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &res_create); |
| 151 | if (ret) { |
| 152 | drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_CREATE failed with %s\n", strerror(errno)); |
| 153 | return ret; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 154 | } |
| 155 | |
Gurchetan Singh | 298b757 | 2019-09-19 09:55:18 -0700 | [diff] [blame] | 156 | for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) |
Kansho Nishida | d97877b | 2019-06-14 18:28:18 +0900 | [diff] [blame] | 157 | bo->handles[plane].u32 = res_create.bo_handle; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 158 | |
| 159 | return 0; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 160 | } |
| 161 | |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 162 | static void *virtio_virgl_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags) |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 163 | { |
| 164 | int ret; |
| 165 | struct drm_virtgpu_map gem_map; |
| 166 | |
| 167 | memset(&gem_map, 0, sizeof(gem_map)); |
| 168 | gem_map.handle = bo->handles[0].u32; |
| 169 | |
| 170 | ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map); |
| 171 | if (ret) { |
Alistair Strachan | 0cfaaa5 | 2018-03-19 14:03:23 -0700 | [diff] [blame] | 172 | drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno)); |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 173 | return MAP_FAILED; |
| 174 | } |
| 175 | |
Gurchetan Singh | 298b757 | 2019-09-19 09:55:18 -0700 | [diff] [blame] | 176 | vma->length = bo->meta.total_size; |
| 177 | return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd, |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 178 | gem_map.offset); |
| 179 | } |
| 180 | |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 181 | static int virtio_gpu_init(struct driver *drv) |
| 182 | { |
| 183 | int ret; |
| 184 | struct virtio_gpu_priv *priv; |
| 185 | struct drm_virtgpu_getparam args; |
| 186 | |
| 187 | priv = calloc(1, sizeof(*priv)); |
| 188 | drv->priv = priv; |
| 189 | |
| 190 | memset(&args, 0, sizeof(args)); |
| 191 | args.param = VIRTGPU_PARAM_3D_FEATURES; |
| 192 | args.value = (uint64_t)(uintptr_t)&priv->has_3d; |
| 193 | ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, &args); |
| 194 | if (ret) { |
| 195 | drv_log("virtio 3D acceleration is not available\n"); |
| 196 | /* Be paranoid */ |
| 197 | priv->has_3d = 0; |
| 198 | } |
| 199 | |
Lepton Wu | dbab083 | 2019-04-19 12:26:39 -0700 | [diff] [blame] | 200 | /* This doesn't mean host can scanout everything, it just means host |
| 201 | * hypervisor can show it. */ |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 202 | drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), |
Lepton Wu | dbab083 | 2019-04-19 12:26:39 -0700 | [diff] [blame] | 203 | &LINEAR_METADATA, BO_USE_RENDER_MASK | BO_USE_SCANOUT); |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 204 | |
Gurchetan Singh | 3f3e5f9 | 2019-07-08 09:50:01 -0700 | [diff] [blame] | 205 | if (priv->has_3d) { |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 206 | drv_add_combinations(drv, texture_source_formats, |
| 207 | ARRAY_SIZE(texture_source_formats), &LINEAR_METADATA, |
| 208 | BO_USE_TEXTURE_MASK); |
Gurchetan Singh | 3f3e5f9 | 2019-07-08 09:50:01 -0700 | [diff] [blame] | 209 | } else { |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 210 | drv_add_combinations(drv, dumb_texture_source_formats, |
| 211 | ARRAY_SIZE(dumb_texture_source_formats), &LINEAR_METADATA, |
| 212 | BO_USE_TEXTURE_MASK); |
Gurchetan Singh | 3f3e5f9 | 2019-07-08 09:50:01 -0700 | [diff] [blame] | 213 | drv_add_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA, |
David Stevens | 9f7897f | 2019-08-09 20:20:23 +0900 | [diff] [blame] | 214 | BO_USE_SW_MASK | BO_USE_LINEAR); |
Gurchetan Singh | 3f3e5f9 | 2019-07-08 09:50:01 -0700 | [diff] [blame] | 215 | } |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 216 | |
Gurchetan Singh | 71bc665 | 2018-09-17 17:42:05 -0700 | [diff] [blame] | 217 | /* Android CTS tests require this. */ |
| 218 | drv_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK); |
| 219 | |
David Stevens | 9f7897f | 2019-08-09 20:20:23 +0900 | [diff] [blame] | 220 | drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA, |
Hirokazu Honda | 20e4a93 | 2019-12-06 15:21:45 +0900 | [diff] [blame^] | 221 | BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER | |
| 222 | BO_USE_HW_VIDEO_ENCODER); |
Keiichi Watanabe | a13dda7 | 2018-08-02 22:45:05 +0900 | [diff] [blame] | 223 | drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA, |
| 224 | BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE); |
| 225 | |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 226 | return drv_modify_linear_combinations(drv); |
| 227 | } |
| 228 | |
| 229 | static void virtio_gpu_close(struct driver *drv) |
| 230 | { |
| 231 | free(drv->priv); |
| 232 | drv->priv = NULL; |
| 233 | } |
| 234 | |
| 235 | static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, |
| 236 | uint64_t use_flags) |
| 237 | { |
| 238 | struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv; |
| 239 | if (priv->has_3d) |
| 240 | return virtio_virgl_bo_create(bo, width, height, format, use_flags); |
| 241 | else |
| 242 | return virtio_dumb_bo_create(bo, width, height, format, use_flags); |
| 243 | } |
| 244 | |
| 245 | static int virtio_gpu_bo_destroy(struct bo *bo) |
| 246 | { |
| 247 | struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv; |
| 248 | if (priv->has_3d) |
| 249 | return drv_gem_bo_destroy(bo); |
| 250 | else |
| 251 | return drv_dumb_bo_destroy(bo); |
| 252 | } |
| 253 | |
| 254 | static void *virtio_gpu_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags) |
| 255 | { |
| 256 | struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv; |
| 257 | if (priv->has_3d) |
| 258 | return virtio_virgl_bo_map(bo, vma, plane, map_flags); |
| 259 | else |
| 260 | return drv_dumb_bo_map(bo, vma, plane, map_flags); |
| 261 | } |
| 262 | |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 263 | static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping) |
| 264 | { |
| 265 | int ret; |
| 266 | struct drm_virtgpu_3d_transfer_from_host xfer; |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 267 | struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv; |
David Stevens | 4d5358d | 2019-10-24 14:59:31 +0900 | [diff] [blame] | 268 | struct drm_virtgpu_3d_wait waitcmd; |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 269 | |
| 270 | if (!priv->has_3d) |
| 271 | return 0; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 272 | |
David Stevens | 4d5358d | 2019-10-24 14:59:31 +0900 | [diff] [blame] | 273 | // Invalidate is only necessary if the host writes to the buffer. |
| 274 | if ((bo->meta.use_flags & BO_USE_RENDERING) == 0) |
| 275 | return 0; |
| 276 | |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 277 | memset(&xfer, 0, sizeof(xfer)); |
| 278 | xfer.bo_handle = mapping->vma->handle; |
| 279 | xfer.box.x = mapping->rect.x; |
| 280 | xfer.box.y = mapping->rect.y; |
| 281 | xfer.box.w = mapping->rect.width; |
| 282 | xfer.box.h = mapping->rect.height; |
| 283 | xfer.box.d = 1; |
| 284 | |
Gurchetan Singh | 05e67cc | 2019-06-28 17:21:40 -0700 | [diff] [blame] | 285 | // Unfortunately, the kernel doesn't actually pass the guest layer_stride and |
| 286 | // guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). We can use |
| 287 | // the level to work around this. |
Gurchetan Singh | 298b757 | 2019-09-19 09:55:18 -0700 | [diff] [blame] | 288 | xfer.level = bo->meta.strides[0]; |
Gurchetan Singh | 05e67cc | 2019-06-28 17:21:40 -0700 | [diff] [blame] | 289 | |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 290 | ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &xfer); |
| 291 | if (ret) { |
Alistair Strachan | 0cfaaa5 | 2018-03-19 14:03:23 -0700 | [diff] [blame] | 292 | drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST failed with %s\n", strerror(errno)); |
Stéphane Marchesin | 6ac299f | 2019-03-21 12:23:29 -0700 | [diff] [blame] | 293 | return -errno; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 294 | } |
| 295 | |
David Stevens | 4d5358d | 2019-10-24 14:59:31 +0900 | [diff] [blame] | 296 | // The transfer needs to complete before invalidate returns so that any host changes |
| 297 | // are visible and to ensure the host doesn't overwrite subsequent guest changes. |
| 298 | // TODO(b/136733358): Support returning fences from transfers |
| 299 | memset(&waitcmd, 0, sizeof(waitcmd)); |
| 300 | waitcmd.handle = mapping->vma->handle; |
| 301 | ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd); |
| 302 | if (ret) { |
| 303 | drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno)); |
| 304 | return -errno; |
| 305 | } |
| 306 | |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 307 | return 0; |
| 308 | } |
| 309 | |
| 310 | static int virtio_gpu_bo_flush(struct bo *bo, struct mapping *mapping) |
| 311 | { |
| 312 | int ret; |
| 313 | struct drm_virtgpu_3d_transfer_to_host xfer; |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 314 | struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv; |
| 315 | |
| 316 | if (!priv->has_3d) |
| 317 | return 0; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 318 | |
| 319 | if (!(mapping->vma->map_flags & BO_MAP_WRITE)) |
| 320 | return 0; |
| 321 | |
| 322 | memset(&xfer, 0, sizeof(xfer)); |
| 323 | xfer.bo_handle = mapping->vma->handle; |
| 324 | xfer.box.x = mapping->rect.x; |
| 325 | xfer.box.y = mapping->rect.y; |
| 326 | xfer.box.w = mapping->rect.width; |
| 327 | xfer.box.h = mapping->rect.height; |
| 328 | xfer.box.d = 1; |
| 329 | |
Gurchetan Singh | 05e67cc | 2019-06-28 17:21:40 -0700 | [diff] [blame] | 330 | // Unfortunately, the kernel doesn't actually pass the guest layer_stride and |
| 331 | // guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). We can use |
| 332 | // the level to work around this. |
Gurchetan Singh | 298b757 | 2019-09-19 09:55:18 -0700 | [diff] [blame] | 333 | xfer.level = bo->meta.strides[0]; |
Gurchetan Singh | 05e67cc | 2019-06-28 17:21:40 -0700 | [diff] [blame] | 334 | |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 335 | ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &xfer); |
| 336 | if (ret) { |
Alistair Strachan | 0cfaaa5 | 2018-03-19 14:03:23 -0700 | [diff] [blame] | 337 | drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST failed with %s\n", strerror(errno)); |
Stéphane Marchesin | 6ac299f | 2019-03-21 12:23:29 -0700 | [diff] [blame] | 338 | return -errno; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 339 | } |
| 340 | |
| 341 | return 0; |
| 342 | } |
| 343 | |
Gurchetan Singh | 0d44d48 | 2019-06-04 19:39:51 -0700 | [diff] [blame] | 344 | static uint32_t virtio_gpu_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags) |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 345 | { |
Gurchetan Singh | f5d280d | 2019-06-04 19:43:41 -0700 | [diff] [blame] | 346 | struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv; |
| 347 | |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 348 | switch (format) { |
| 349 | case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED: |
Keiichi Watanabe | a13dda7 | 2018-08-02 22:45:05 +0900 | [diff] [blame] | 350 | /* Camera subsystem requires NV12. */ |
| 351 | if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE)) |
| 352 | return DRM_FORMAT_NV12; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 353 | /*HACK: See b/28671744 */ |
| 354 | return DRM_FORMAT_XBGR8888; |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 355 | case DRM_FORMAT_FLEX_YCbCr_420_888: |
Gurchetan Singh | f5d280d | 2019-06-04 19:43:41 -0700 | [diff] [blame] | 356 | /* |
| 357 | * All of our host drivers prefer NV12 as their flexible media format. |
| 358 | * If that changes, this will need to be modified. |
| 359 | */ |
| 360 | if (priv->has_3d) |
| 361 | return DRM_FORMAT_NV12; |
| 362 | else |
| 363 | return DRM_FORMAT_YVU420; |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 364 | default: |
| 365 | return format; |
| 366 | } |
| 367 | } |
| 368 | |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 369 | const struct backend backend_virtio_gpu = { |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 370 | .name = "virtio_gpu", |
| 371 | .init = virtio_gpu_init, |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 372 | .close = virtio_gpu_close, |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 373 | .bo_create = virtio_gpu_bo_create, |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 374 | .bo_destroy = virtio_gpu_bo_destroy, |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 375 | .bo_import = drv_prime_bo_import, |
Lepton Wu | 249e863 | 2018-04-05 12:50:03 -0700 | [diff] [blame] | 376 | .bo_map = virtio_gpu_bo_map, |
Zach Reizner | 85c4c5f | 2017-10-04 13:15:57 -0700 | [diff] [blame] | 377 | .bo_unmap = drv_bo_munmap, |
| 378 | .bo_invalidate = virtio_gpu_bo_invalidate, |
| 379 | .bo_flush = virtio_gpu_bo_flush, |
| 380 | .resolve_format = virtio_gpu_resolve_format, |
| 381 | }; |