Gurchetan Singh | 73c141e | 2021-01-21 14:51:19 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2021 The Chromium OS Authors. All rights reserved. |
| 3 | * Use of this source code is governed by a BSD-style license that can be |
| 4 | * found in the LICENSE file. |
| 5 | */ |
| 6 | |
| 7 | #include <errno.h> |
| 8 | #include <string.h> |
| 9 | #include <sys/mman.h> |
| 10 | #include <xf86drm.h> |
| 11 | |
| 12 | #include "drv_priv.h" |
| 13 | #include "external/virtgpu_cross_domain_protocol.h" |
| 14 | #include "external/virtgpu_drm.h" |
| 15 | #include "helpers.h" |
| 16 | #include "util.h" |
| 17 | #include "virtgpu.h" |
| 18 | |
| 19 | #define CAPSET_CROSS_DOMAIN 5 |
| 20 | #define CAPSET_CROSS_FAKE 30 |
| 21 | |
| 22 | static const uint32_t scanout_render_formats[] = { DRM_FORMAT_ABGR2101010, DRM_FORMAT_ABGR8888, |
| 23 | DRM_FORMAT_ARGB2101010, DRM_FORMAT_ARGB8888, |
| 24 | DRM_FORMAT_RGB565, DRM_FORMAT_XBGR2101010, |
| 25 | DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB2101010, |
| 26 | DRM_FORMAT_XRGB8888 }; |
| 27 | |
| 28 | static const uint32_t render_formats[] = { DRM_FORMAT_ABGR16161616F }; |
| 29 | |
| 30 | static const uint32_t texture_only_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_NV12, DRM_FORMAT_P010, |
| 31 | DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID }; |
| 32 | |
| 33 | extern struct virtgpu_param params[]; |
| 34 | |
| 35 | struct cross_domain_private { |
| 36 | uint32_t ring_handle; |
| 37 | void *ring_addr; |
| 38 | struct drv_array *metadata_cache; |
| 39 | }; |
| 40 | |
| 41 | static void cross_domain_release_private(struct driver *drv) |
| 42 | { |
| 43 | int ret; |
| 44 | struct cross_domain_private *priv = drv->priv; |
| 45 | struct drm_gem_close gem_close = { 0 }; |
| 46 | |
| 47 | if (priv->ring_addr != MAP_FAILED) |
| 48 | munmap(priv->ring_addr, PAGE_SIZE); |
| 49 | |
| 50 | if (priv->ring_handle) { |
| 51 | gem_close.handle = priv->ring_handle; |
| 52 | |
| 53 | ret = drmIoctl(drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close); |
| 54 | if (ret) { |
| 55 | drv_log("DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n", |
| 56 | priv->ring_handle, ret); |
| 57 | } |
| 58 | } |
| 59 | |
| 60 | drv_array_destroy(priv->metadata_cache); |
| 61 | free(priv); |
| 62 | } |
| 63 | |
| 64 | static void add_combinations(struct driver *drv) |
| 65 | { |
| 66 | struct format_metadata metadata; |
| 67 | |
| 68 | // Linear metadata always supported. |
| 69 | metadata.tiling = 0; |
| 70 | metadata.priority = 1; |
| 71 | metadata.modifier = DRM_FORMAT_MOD_LINEAR; |
| 72 | |
| 73 | drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats), |
| 74 | &metadata, BO_USE_RENDER_MASK | BO_USE_SCANOUT); |
| 75 | |
| 76 | drv_add_combinations(drv, render_formats, ARRAY_SIZE(render_formats), &metadata, |
| 77 | BO_USE_RENDER_MASK); |
| 78 | |
| 79 | drv_add_combinations(drv, texture_only_formats, ARRAY_SIZE(texture_only_formats), &metadata, |
| 80 | BO_USE_TEXTURE_MASK); |
| 81 | |
| 82 | /* Android CTS tests require this. */ |
| 83 | drv_add_combination(drv, DRM_FORMAT_BGR888, &metadata, BO_USE_SW_MASK); |
| 84 | |
| 85 | drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata, BO_USE_HW_VIDEO_ENCODER); |
| 86 | drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata, |
| 87 | BO_USE_HW_VIDEO_DECODER | BO_USE_SCANOUT | BO_USE_HW_VIDEO_ENCODER); |
| 88 | |
| 89 | /* |
| 90 | * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots |
| 91 | * from camera and input/output from hardware decoder/encoder. |
| 92 | */ |
| 93 | drv_modify_combination(drv, DRM_FORMAT_R8, &metadata, |
| 94 | BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER | |
| 95 | BO_USE_HW_VIDEO_ENCODER); |
| 96 | |
| 97 | drv_modify_linear_combinations(drv); |
| 98 | } |
| 99 | |
| 100 | static int cross_domain_submit_cmd(struct driver *drv, uint32_t *cmd, uint32_t cmd_size, bool wait) |
| 101 | { |
| 102 | int ret; |
| 103 | struct drm_virtgpu_3d_wait wait_3d = { 0 }; |
| 104 | struct drm_virtgpu_execbuffer exec = { 0 }; |
| 105 | struct cross_domain_private *priv = drv->priv; |
| 106 | |
| 107 | exec.command = (uint64_t)&cmd[0]; |
| 108 | exec.size = cmd_size; |
| 109 | if (wait) { |
Gurchetan Singh | 4e767d3 | 2021-08-25 10:24:50 -0700 | [diff] [blame] | 110 | exec.flags = VIRTGPU_EXECBUF_RING_IDX; |
Gurchetan Singh | 73c141e | 2021-01-21 14:51:19 -0800 | [diff] [blame] | 111 | exec.bo_handles = (uint64_t)&priv->ring_handle; |
| 112 | exec.num_bo_handles = 1; |
| 113 | } |
| 114 | |
| 115 | ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &exec); |
| 116 | if (ret < 0) { |
| 117 | drv_log("DRM_IOCTL_VIRTGPU_EXECBUFFER failed with %s\n", strerror(errno)); |
| 118 | return -EINVAL; |
| 119 | } |
| 120 | |
| 121 | ret = -EAGAIN; |
| 122 | while (ret == -EAGAIN) { |
| 123 | wait_3d.handle = priv->ring_handle; |
| 124 | ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &wait_3d); |
| 125 | } |
| 126 | |
| 127 | if (ret < 0) { |
| 128 | drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno)); |
| 129 | return ret; |
| 130 | } |
| 131 | |
| 132 | return 0; |
| 133 | } |
| 134 | |
| 135 | static bool metadata_equal(struct bo_metadata *current, struct bo_metadata *cached) |
| 136 | { |
| 137 | if ((current->width == cached->width) && (current->height == cached->height) && |
| 138 | (current->format == cached->format) && (current->use_flags == cached->use_flags)) |
| 139 | return true; |
| 140 | return false; |
| 141 | } |
| 142 | |
| 143 | static int cross_domain_metadata_query(struct driver *drv, struct bo_metadata *metadata) |
| 144 | { |
| 145 | int ret = 0; |
| 146 | struct bo_metadata *cached_data = NULL; |
| 147 | struct cross_domain_private *priv = drv->priv; |
| 148 | struct CrossDomainGetImageRequirements cmd_get_reqs; |
| 149 | uint32_t *addr = (uint32_t *)priv->ring_addr; |
| 150 | uint32_t plane, remaining_size; |
| 151 | |
| 152 | memset(&cmd_get_reqs, 0, sizeof(cmd_get_reqs)); |
| 153 | pthread_mutex_lock(&drv->driver_lock); |
| 154 | for (uint32_t i = 0; i < drv_array_size(priv->metadata_cache); i++) { |
| 155 | cached_data = (struct bo_metadata *)drv_array_at_idx(priv->metadata_cache, i); |
| 156 | if (!metadata_equal(metadata, cached_data)) |
| 157 | continue; |
| 158 | |
| 159 | memcpy(metadata, cached_data, sizeof(*cached_data)); |
| 160 | goto out_unlock; |
| 161 | } |
| 162 | |
| 163 | cmd_get_reqs.hdr.cmd = CROSS_DOMAIN_CMD_GET_IMAGE_REQUIREMENTS; |
| 164 | cmd_get_reqs.hdr.cmd_size = sizeof(struct CrossDomainGetImageRequirements); |
| 165 | |
| 166 | cmd_get_reqs.width = metadata->width; |
| 167 | cmd_get_reqs.height = metadata->height; |
| 168 | cmd_get_reqs.drm_format = |
| 169 | (metadata->format == DRM_FORMAT_YVU420_ANDROID) ? DRM_FORMAT_YVU420 : metadata->format; |
| 170 | cmd_get_reqs.flags = metadata->use_flags; |
| 171 | |
| 172 | /* |
| 173 | * It is possible to avoid blocking other bo_create() calls by unlocking before |
| 174 | * cross_domain_submit_cmd() and re-locking afterwards. However, that would require |
| 175 | * another scan of the metadata cache before drv_array_append in case two bo_create() calls |
| 176 | * do the same metadata query. Until cross_domain functionality is more widely tested, |
| 177 | * leave this optimization out for now. |
| 178 | */ |
| 179 | ret = cross_domain_submit_cmd(drv, (uint32_t *)&cmd_get_reqs, cmd_get_reqs.hdr.cmd_size, |
| 180 | true); |
| 181 | if (ret < 0) |
| 182 | goto out_unlock; |
| 183 | |
| 184 | memcpy(&metadata->strides, &addr[0], 4 * sizeof(uint32_t)); |
| 185 | memcpy(&metadata->offsets, &addr[4], 4 * sizeof(uint32_t)); |
| 186 | memcpy(&metadata->format_modifier, &addr[8], sizeof(uint64_t)); |
| 187 | memcpy(&metadata->total_size, &addr[10], sizeof(uint64_t)); |
Gurchetan Singh | 4e767d3 | 2021-08-25 10:24:50 -0700 | [diff] [blame] | 188 | memcpy(&metadata->blob_id, &addr[12], sizeof(uint32_t)); |
Gurchetan Singh | 73c141e | 2021-01-21 14:51:19 -0800 | [diff] [blame] | 189 | |
Gurchetan Singh | 4e767d3 | 2021-08-25 10:24:50 -0700 | [diff] [blame] | 190 | metadata->map_info = addr[13]; |
| 191 | metadata->memory_idx = addr[14]; |
| 192 | metadata->physical_device_idx = addr[15]; |
Gurchetan Singh | 73c141e | 2021-01-21 14:51:19 -0800 | [diff] [blame] | 193 | |
| 194 | remaining_size = metadata->total_size; |
| 195 | for (plane = 0; plane < metadata->num_planes; plane++) { |
| 196 | if (plane != 0) { |
| 197 | metadata->sizes[plane - 1] = metadata->offsets[plane]; |
| 198 | remaining_size -= metadata->offsets[plane]; |
| 199 | } |
| 200 | } |
| 201 | |
| 202 | metadata->sizes[plane - 1] = remaining_size; |
| 203 | drv_array_append(priv->metadata_cache, metadata); |
| 204 | |
| 205 | out_unlock: |
| 206 | pthread_mutex_unlock(&drv->driver_lock); |
| 207 | return ret; |
| 208 | } |
| 209 | |
| 210 | static int cross_domain_init(struct driver *drv) |
| 211 | { |
| 212 | int ret; |
| 213 | struct cross_domain_private *priv; |
| 214 | struct drm_virtgpu_map map = { 0 }; |
| 215 | struct drm_virtgpu_get_caps args = { 0 }; |
| 216 | struct drm_virtgpu_context_init init = { 0 }; |
| 217 | struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 }; |
| 218 | struct drm_virtgpu_context_set_param ctx_set_params[2] = { { 0 } }; |
| 219 | |
| 220 | struct CrossDomainInit cmd_init; |
| 221 | struct CrossDomainCapabilities cross_domain_caps; |
| 222 | |
| 223 | memset(&cmd_init, 0, sizeof(cmd_init)); |
| 224 | if (!params[param_context_init].value) |
| 225 | return -ENOTSUP; |
| 226 | |
| 227 | if ((params[param_supported_capset_ids].value & (1 << CAPSET_CROSS_DOMAIN)) == 0) |
| 228 | return -ENOTSUP; |
| 229 | |
Gurchetan Singh | b2917b2 | 2021-04-28 16:24:49 -0700 | [diff] [blame] | 230 | if (!params[param_resource_blob].value) |
| 231 | return -ENOTSUP; |
| 232 | |
| 233 | /// Need zero copy memory |
| 234 | if (!params[param_host_visible].value && !params[param_create_guest_handle].value) |
| 235 | return -ENOTSUP; |
| 236 | |
Gurchetan Singh | 73c141e | 2021-01-21 14:51:19 -0800 | [diff] [blame] | 237 | /* |
| 238 | * crosvm never reports the fake capset. This is just an extra check to make sure we |
| 239 | * don't use the cross-domain context by accident. Developers may remove this for |
| 240 | * testing purposes. |
| 241 | */ |
| 242 | if ((params[param_supported_capset_ids].value & (1 << CAPSET_CROSS_FAKE)) == 0) |
| 243 | return -ENOTSUP; |
| 244 | |
| 245 | priv = calloc(1, sizeof(*priv)); |
Yiwei Zhang | afdf87d | 2021-09-28 04:06:06 +0000 | [diff] [blame^] | 246 | if (!priv) |
| 247 | return -ENOMEM; |
| 248 | |
Gurchetan Singh | 73c141e | 2021-01-21 14:51:19 -0800 | [diff] [blame] | 249 | priv->metadata_cache = drv_array_init(sizeof(struct bo_metadata)); |
Yiwei Zhang | afdf87d | 2021-09-28 04:06:06 +0000 | [diff] [blame^] | 250 | if (!priv->metadata_cache) { |
| 251 | ret = -ENOMEM; |
| 252 | goto free_private; |
| 253 | } |
| 254 | |
Gurchetan Singh | 73c141e | 2021-01-21 14:51:19 -0800 | [diff] [blame] | 255 | priv->ring_addr = MAP_FAILED; |
| 256 | drv->priv = priv; |
| 257 | |
| 258 | args.cap_set_id = CAPSET_CROSS_DOMAIN; |
| 259 | args.size = sizeof(struct CrossDomainCapabilities); |
| 260 | args.addr = (unsigned long long)&cross_domain_caps; |
| 261 | |
| 262 | ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args); |
| 263 | if (ret) { |
| 264 | drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno)); |
| 265 | goto free_private; |
| 266 | } |
| 267 | |
| 268 | // When 3D features are avilable, but the host does not support external memory, fall back |
| 269 | // to the virgl minigbm backend. This typically means the guest side minigbm resource will |
| 270 | // be backed by a host OpenGL texture. |
| 271 | if (!cross_domain_caps.supports_external_gpu_memory && params[param_3d].value) { |
| 272 | ret = -ENOTSUP; |
| 273 | goto free_private; |
| 274 | } |
| 275 | |
| 276 | // Intialize the cross domain context. Create one fence context to wait for metadata |
| 277 | // queries. |
| 278 | ctx_set_params[0].param = VIRTGPU_CONTEXT_PARAM_CAPSET_ID; |
| 279 | ctx_set_params[0].value = CAPSET_CROSS_DOMAIN; |
Gurchetan Singh | 4e767d3 | 2021-08-25 10:24:50 -0700 | [diff] [blame] | 280 | ctx_set_params[1].param = VIRTGPU_CONTEXT_PARAM_NUM_RINGS; |
Gurchetan Singh | 73c141e | 2021-01-21 14:51:19 -0800 | [diff] [blame] | 281 | ctx_set_params[1].value = 1; |
| 282 | |
| 283 | init.ctx_set_params = (unsigned long long)&ctx_set_params[0]; |
| 284 | init.num_params = 2; |
| 285 | ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_CONTEXT_INIT, &init); |
| 286 | if (ret) { |
| 287 | drv_log("DRM_IOCTL_VIRTGPU_CONTEXT_INIT failed with %s\n", strerror(errno)); |
| 288 | goto free_private; |
| 289 | } |
| 290 | |
| 291 | // Create a shared ring buffer to read metadata queries. |
| 292 | drm_rc_blob.size = PAGE_SIZE; |
| 293 | drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_GUEST; |
| 294 | drm_rc_blob.blob_flags = VIRTGPU_BLOB_FLAG_USE_MAPPABLE; |
| 295 | |
| 296 | ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob); |
| 297 | if (ret < 0) { |
| 298 | drv_log("DRM_VIRTGPU_RESOURCE_CREATE_BLOB failed with %s\n", strerror(errno)); |
| 299 | goto free_private; |
| 300 | } |
| 301 | |
| 302 | priv->ring_handle = drm_rc_blob.bo_handle; |
| 303 | |
| 304 | // Map shared ring buffer. |
| 305 | map.handle = priv->ring_handle; |
| 306 | ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_MAP, &map); |
| 307 | if (ret < 0) { |
| 308 | drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno)); |
| 309 | goto free_private; |
| 310 | } |
| 311 | |
| 312 | priv->ring_addr = |
| 313 | mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, drv->fd, map.offset); |
| 314 | |
| 315 | if (priv->ring_addr == MAP_FAILED) { |
| 316 | drv_log("mmap failed with %s\n", strerror(errno)); |
| 317 | goto free_private; |
| 318 | } |
| 319 | |
| 320 | // Notify host about ring buffer |
| 321 | cmd_init.hdr.cmd = CROSS_DOMAIN_CMD_INIT; |
| 322 | cmd_init.hdr.cmd_size = sizeof(struct CrossDomainInit); |
| 323 | cmd_init.ring_id = drm_rc_blob.res_handle; |
| 324 | ret = cross_domain_submit_cmd(drv, (uint32_t *)&cmd_init, cmd_init.hdr.cmd_size, false); |
| 325 | if (ret < 0) |
| 326 | goto free_private; |
| 327 | |
| 328 | // minigbm bookkeeping |
| 329 | add_combinations(drv); |
| 330 | return 0; |
| 331 | |
| 332 | free_private: |
| 333 | cross_domain_release_private(drv); |
| 334 | return ret; |
| 335 | } |
| 336 | |
| 337 | static void cross_domain_close(struct driver *drv) |
| 338 | { |
| 339 | cross_domain_release_private(drv); |
| 340 | } |
| 341 | |
| 342 | static int cross_domain_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, |
| 343 | uint64_t use_flags) |
| 344 | { |
| 345 | int ret; |
| 346 | uint32_t blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE; |
| 347 | struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 }; |
| 348 | |
| 349 | ret = cross_domain_metadata_query(bo->drv, &bo->meta); |
| 350 | if (ret < 0) { |
| 351 | drv_log("Metadata query failed"); |
| 352 | return ret; |
| 353 | } |
| 354 | |
| 355 | if (use_flags & BO_USE_SW_MASK) |
| 356 | blob_flags |= VIRTGPU_BLOB_FLAG_USE_MAPPABLE; |
| 357 | |
| 358 | if (params[param_cross_device].value && (use_flags & BO_USE_NON_GPU_HW)) |
| 359 | blob_flags |= VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE; |
| 360 | |
Gurchetan Singh | b2917b2 | 2021-04-28 16:24:49 -0700 | [diff] [blame] | 361 | /// It may be possible to have host3d blobs and handles from guest memory at the same time. |
| 362 | /// But for the immediate use cases, we will either have one or the other. For now, just |
| 363 | /// prefer guest memory since adding that feature is more involved (requires --udmabuf |
| 364 | /// flag to crosvm), so developers would likely test that. |
| 365 | if (params[param_create_guest_handle].value) { |
| 366 | drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_GUEST; |
| 367 | blob_flags |= VIRTGPU_BLOB_FLAG_CREATE_GUEST_HANDLE; |
| 368 | } else if (params[param_host_visible].value) { |
| 369 | drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST3D; |
| 370 | } |
| 371 | |
Gurchetan Singh | 73c141e | 2021-01-21 14:51:19 -0800 | [diff] [blame] | 372 | drm_rc_blob.size = bo->meta.total_size; |
Gurchetan Singh | 73c141e | 2021-01-21 14:51:19 -0800 | [diff] [blame] | 373 | drm_rc_blob.blob_flags = blob_flags; |
Gurchetan Singh | 4e767d3 | 2021-08-25 10:24:50 -0700 | [diff] [blame] | 374 | drm_rc_blob.blob_id = (uint64_t)bo->meta.blob_id; |
Gurchetan Singh | 73c141e | 2021-01-21 14:51:19 -0800 | [diff] [blame] | 375 | |
| 376 | ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob); |
| 377 | if (ret < 0) { |
| 378 | drv_log("DRM_VIRTGPU_RESOURCE_CREATE_BLOB failed with %s\n", strerror(errno)); |
| 379 | return -errno; |
| 380 | } |
| 381 | |
| 382 | for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) |
| 383 | bo->handles[plane].u32 = drm_rc_blob.bo_handle; |
| 384 | |
| 385 | return 0; |
| 386 | } |
| 387 | |
| 388 | static void *cross_domain_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags) |
| 389 | { |
| 390 | int ret; |
| 391 | struct drm_virtgpu_map gem_map = { 0 }; |
| 392 | |
| 393 | gem_map.handle = bo->handles[0].u32; |
| 394 | ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map); |
| 395 | if (ret) { |
| 396 | drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno)); |
| 397 | return MAP_FAILED; |
| 398 | } |
| 399 | |
| 400 | vma->length = bo->meta.total_size; |
| 401 | return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd, |
| 402 | gem_map.offset); |
| 403 | } |
| 404 | |
| 405 | const struct backend virtgpu_cross_domain = { |
| 406 | .name = "virtgpu_cross_domain", |
| 407 | .init = cross_domain_init, |
| 408 | .close = cross_domain_close, |
| 409 | .bo_create = cross_domain_bo_create, |
| 410 | .bo_import = drv_prime_bo_import, |
| 411 | .bo_destroy = drv_gem_bo_destroy, |
| 412 | .bo_map = cross_domain_bo_map, |
| 413 | .bo_unmap = drv_bo_munmap, |
| 414 | .resolve_format = drv_resolve_format_helper, |
Yiwei Zhang | c1413ea | 2021-09-17 08:20:21 +0000 | [diff] [blame] | 415 | .resolve_use_flags = drv_resolve_use_flags_helper, |
Gurchetan Singh | 73c141e | 2021-01-21 14:51:19 -0800 | [diff] [blame] | 416 | }; |