Dave Airlie | dc5698e | 2013-09-09 10:02:56 +1000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2015 Red Hat, Inc. |
| 3 | * All Rights Reserved. |
| 4 | * |
| 5 | * Authors: |
| 6 | * Dave Airlie <airlied@redhat.com> |
| 7 | * Gerd Hoffmann <kraxel@redhat.com> |
| 8 | * |
| 9 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 10 | * copy of this software and associated documentation files (the "Software"), |
| 11 | * to deal in the Software without restriction, including without limitation |
| 12 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 13 | * and/or sell copies of the Software, and to permit persons to whom the |
| 14 | * Software is furnished to do so, subject to the following conditions: |
| 15 | * |
| 16 | * The above copyright notice and this permission notice (including the next |
| 17 | * paragraph) shall be included in all copies or substantial portions of the |
| 18 | * Software. |
| 19 | * |
| 20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 21 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 23 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 24 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 25 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 26 | * OTHER DEALINGS IN THE SOFTWARE. |
| 27 | */ |
| 28 | |
| 29 | #include <drm/drmP.h> |
| 30 | #include "virtgpu_drv.h" |
| 31 | #include <linux/virtio.h> |
| 32 | #include <linux/virtio_config.h> |
| 33 | #include <linux/virtio_ring.h> |
| 34 | |
| 35 | #define MAX_INLINE_CMD_SIZE 96 |
| 36 | #define MAX_INLINE_RESP_SIZE 24 |
| 37 | #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \ |
| 38 | + MAX_INLINE_CMD_SIZE \ |
| 39 | + MAX_INLINE_RESP_SIZE) |
| 40 | |
| 41 | void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, |
| 42 | uint32_t *resid) |
| 43 | { |
| 44 | int handle; |
| 45 | |
| 46 | idr_preload(GFP_KERNEL); |
| 47 | spin_lock(&vgdev->resource_idr_lock); |
| 48 | handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT); |
| 49 | spin_unlock(&vgdev->resource_idr_lock); |
| 50 | idr_preload_end(); |
| 51 | *resid = handle; |
| 52 | } |
| 53 | |
| 54 | void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id) |
| 55 | { |
| 56 | spin_lock(&vgdev->resource_idr_lock); |
| 57 | idr_remove(&vgdev->resource_idr, id); |
| 58 | spin_unlock(&vgdev->resource_idr_lock); |
| 59 | } |
| 60 | |
| 61 | void virtio_gpu_ctrl_ack(struct virtqueue *vq) |
| 62 | { |
| 63 | struct drm_device *dev = vq->vdev->priv; |
| 64 | struct virtio_gpu_device *vgdev = dev->dev_private; |
| 65 | schedule_work(&vgdev->ctrlq.dequeue_work); |
| 66 | } |
| 67 | |
| 68 | void virtio_gpu_cursor_ack(struct virtqueue *vq) |
| 69 | { |
| 70 | struct drm_device *dev = vq->vdev->priv; |
| 71 | struct virtio_gpu_device *vgdev = dev->dev_private; |
| 72 | schedule_work(&vgdev->cursorq.dequeue_work); |
| 73 | } |
| 74 | |
| 75 | int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev) |
| 76 | { |
Gerd Hoffmann | f5985bf | 2017-03-01 15:09:08 +0100 | [diff] [blame^] | 77 | vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs", |
| 78 | VBUFFER_SIZE, |
| 79 | __alignof__(struct virtio_gpu_vbuffer), |
| 80 | 0, NULL); |
Dave Airlie | dc5698e | 2013-09-09 10:02:56 +1000 | [diff] [blame] | 81 | if (!vgdev->vbufs) |
| 82 | return -ENOMEM; |
Dave Airlie | dc5698e | 2013-09-09 10:02:56 +1000 | [diff] [blame] | 83 | return 0; |
| 84 | } |
| 85 | |
| 86 | void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev) |
| 87 | { |
Gerd Hoffmann | f5985bf | 2017-03-01 15:09:08 +0100 | [diff] [blame^] | 88 | kmem_cache_destroy(vgdev->vbufs); |
| 89 | vgdev->vbufs = NULL; |
Dave Airlie | dc5698e | 2013-09-09 10:02:56 +1000 | [diff] [blame] | 90 | } |
| 91 | |
| 92 | static struct virtio_gpu_vbuffer* |
| 93 | virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev, |
| 94 | int size, int resp_size, void *resp_buf, |
| 95 | virtio_gpu_resp_cb resp_cb) |
| 96 | { |
| 97 | struct virtio_gpu_vbuffer *vbuf; |
| 98 | |
Gerd Hoffmann | f5985bf | 2017-03-01 15:09:08 +0100 | [diff] [blame^] | 99 | vbuf = kmem_cache_alloc(vgdev->vbufs, GFP_KERNEL); |
| 100 | if (IS_ERR(vbuf)) |
| 101 | return ERR_CAST(vbuf); |
Dave Airlie | dc5698e | 2013-09-09 10:02:56 +1000 | [diff] [blame] | 102 | memset(vbuf, 0, VBUFFER_SIZE); |
| 103 | |
| 104 | BUG_ON(size > MAX_INLINE_CMD_SIZE); |
| 105 | vbuf->buf = (void *)vbuf + sizeof(*vbuf); |
| 106 | vbuf->size = size; |
| 107 | |
| 108 | vbuf->resp_cb = resp_cb; |
| 109 | vbuf->resp_size = resp_size; |
| 110 | if (resp_size <= MAX_INLINE_RESP_SIZE) |
| 111 | vbuf->resp_buf = (void *)vbuf->buf + size; |
| 112 | else |
| 113 | vbuf->resp_buf = resp_buf; |
| 114 | BUG_ON(!vbuf->resp_buf); |
| 115 | return vbuf; |
| 116 | } |
| 117 | |
| 118 | static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev, |
| 119 | struct virtio_gpu_vbuffer **vbuffer_p, |
| 120 | int size) |
| 121 | { |
| 122 | struct virtio_gpu_vbuffer *vbuf; |
| 123 | |
| 124 | vbuf = virtio_gpu_get_vbuf(vgdev, size, |
| 125 | sizeof(struct virtio_gpu_ctrl_hdr), |
| 126 | NULL, NULL); |
| 127 | if (IS_ERR(vbuf)) { |
| 128 | *vbuffer_p = NULL; |
| 129 | return ERR_CAST(vbuf); |
| 130 | } |
| 131 | *vbuffer_p = vbuf; |
| 132 | return vbuf->buf; |
| 133 | } |
| 134 | |
| 135 | static struct virtio_gpu_update_cursor* |
| 136 | virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev, |
| 137 | struct virtio_gpu_vbuffer **vbuffer_p) |
| 138 | { |
| 139 | struct virtio_gpu_vbuffer *vbuf; |
| 140 | |
| 141 | vbuf = virtio_gpu_get_vbuf |
| 142 | (vgdev, sizeof(struct virtio_gpu_update_cursor), |
| 143 | 0, NULL, NULL); |
| 144 | if (IS_ERR(vbuf)) { |
| 145 | *vbuffer_p = NULL; |
| 146 | return ERR_CAST(vbuf); |
| 147 | } |
| 148 | *vbuffer_p = vbuf; |
| 149 | return (struct virtio_gpu_update_cursor *)vbuf->buf; |
| 150 | } |
| 151 | |
| 152 | static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev, |
| 153 | virtio_gpu_resp_cb cb, |
| 154 | struct virtio_gpu_vbuffer **vbuffer_p, |
| 155 | int cmd_size, int resp_size, |
| 156 | void *resp_buf) |
| 157 | { |
| 158 | struct virtio_gpu_vbuffer *vbuf; |
| 159 | |
| 160 | vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size, |
| 161 | resp_size, resp_buf, cb); |
| 162 | if (IS_ERR(vbuf)) { |
| 163 | *vbuffer_p = NULL; |
| 164 | return ERR_CAST(vbuf); |
| 165 | } |
| 166 | *vbuffer_p = vbuf; |
| 167 | return (struct virtio_gpu_command *)vbuf->buf; |
| 168 | } |
| 169 | |
| 170 | static void free_vbuf(struct virtio_gpu_device *vgdev, |
| 171 | struct virtio_gpu_vbuffer *vbuf) |
| 172 | { |
| 173 | if (vbuf->resp_size > MAX_INLINE_RESP_SIZE) |
| 174 | kfree(vbuf->resp_buf); |
| 175 | kfree(vbuf->data_buf); |
Gerd Hoffmann | f5985bf | 2017-03-01 15:09:08 +0100 | [diff] [blame^] | 176 | kmem_cache_free(vgdev->vbufs, vbuf); |
Dave Airlie | dc5698e | 2013-09-09 10:02:56 +1000 | [diff] [blame] | 177 | } |
| 178 | |
| 179 | static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list) |
| 180 | { |
| 181 | struct virtio_gpu_vbuffer *vbuf; |
| 182 | unsigned int len; |
| 183 | int freed = 0; |
| 184 | |
| 185 | while ((vbuf = virtqueue_get_buf(vq, &len))) { |
| 186 | list_add_tail(&vbuf->list, reclaim_list); |
| 187 | freed++; |
| 188 | } |
| 189 | if (freed == 0) |
| 190 | DRM_DEBUG("Huh? zero vbufs reclaimed"); |
| 191 | } |
| 192 | |
| 193 | void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) |
| 194 | { |
| 195 | struct virtio_gpu_device *vgdev = |
| 196 | container_of(work, struct virtio_gpu_device, |
| 197 | ctrlq.dequeue_work); |
| 198 | struct list_head reclaim_list; |
| 199 | struct virtio_gpu_vbuffer *entry, *tmp; |
| 200 | struct virtio_gpu_ctrl_hdr *resp; |
| 201 | u64 fence_id = 0; |
| 202 | |
| 203 | INIT_LIST_HEAD(&reclaim_list); |
| 204 | spin_lock(&vgdev->ctrlq.qlock); |
| 205 | do { |
| 206 | virtqueue_disable_cb(vgdev->ctrlq.vq); |
| 207 | reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list); |
| 208 | |
| 209 | } while (!virtqueue_enable_cb(vgdev->ctrlq.vq)); |
| 210 | spin_unlock(&vgdev->ctrlq.qlock); |
| 211 | |
| 212 | list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { |
| 213 | resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; |
| 214 | if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) |
| 215 | DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type)); |
| 216 | if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) { |
| 217 | u64 f = le64_to_cpu(resp->fence_id); |
| 218 | |
| 219 | if (fence_id > f) { |
| 220 | DRM_ERROR("%s: Oops: fence %llx -> %llx\n", |
| 221 | __func__, fence_id, f); |
| 222 | } else { |
| 223 | fence_id = f; |
| 224 | } |
| 225 | } |
| 226 | if (entry->resp_cb) |
| 227 | entry->resp_cb(vgdev, entry); |
| 228 | |
| 229 | list_del(&entry->list); |
| 230 | free_vbuf(vgdev, entry); |
| 231 | } |
| 232 | wake_up(&vgdev->ctrlq.ack_queue); |
| 233 | |
| 234 | if (fence_id) |
| 235 | virtio_gpu_fence_event_process(vgdev, fence_id); |
| 236 | } |
| 237 | |
| 238 | void virtio_gpu_dequeue_cursor_func(struct work_struct *work) |
| 239 | { |
| 240 | struct virtio_gpu_device *vgdev = |
| 241 | container_of(work, struct virtio_gpu_device, |
| 242 | cursorq.dequeue_work); |
| 243 | struct list_head reclaim_list; |
| 244 | struct virtio_gpu_vbuffer *entry, *tmp; |
| 245 | |
| 246 | INIT_LIST_HEAD(&reclaim_list); |
| 247 | spin_lock(&vgdev->cursorq.qlock); |
| 248 | do { |
| 249 | virtqueue_disable_cb(vgdev->cursorq.vq); |
| 250 | reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list); |
| 251 | } while (!virtqueue_enable_cb(vgdev->cursorq.vq)); |
| 252 | spin_unlock(&vgdev->cursorq.qlock); |
| 253 | |
| 254 | list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { |
| 255 | list_del(&entry->list); |
| 256 | free_vbuf(vgdev, entry); |
| 257 | } |
| 258 | wake_up(&vgdev->cursorq.ack_queue); |
| 259 | } |
| 260 | |
Gerd Hoffmann | 9c73f47 | 2015-08-19 23:35:57 +0200 | [diff] [blame] | 261 | static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, |
| 262 | struct virtio_gpu_vbuffer *vbuf) |
Michael S. Tsirkin | 3373755 | 2016-12-05 22:39:30 +0200 | [diff] [blame] | 263 | __releases(&vgdev->ctrlq.qlock) |
| 264 | __acquires(&vgdev->ctrlq.qlock) |
Dave Airlie | dc5698e | 2013-09-09 10:02:56 +1000 | [diff] [blame] | 265 | { |
| 266 | struct virtqueue *vq = vgdev->ctrlq.vq; |
| 267 | struct scatterlist *sgs[3], vcmd, vout, vresp; |
| 268 | int outcnt = 0, incnt = 0; |
| 269 | int ret; |
| 270 | |
| 271 | if (!vgdev->vqs_ready) |
| 272 | return -ENODEV; |
| 273 | |
| 274 | sg_init_one(&vcmd, vbuf->buf, vbuf->size); |
| 275 | sgs[outcnt+incnt] = &vcmd; |
| 276 | outcnt++; |
| 277 | |
| 278 | if (vbuf->data_size) { |
| 279 | sg_init_one(&vout, vbuf->data_buf, vbuf->data_size); |
| 280 | sgs[outcnt + incnt] = &vout; |
| 281 | outcnt++; |
| 282 | } |
| 283 | |
| 284 | if (vbuf->resp_size) { |
| 285 | sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size); |
| 286 | sgs[outcnt + incnt] = &vresp; |
| 287 | incnt++; |
| 288 | } |
| 289 | |
Dave Airlie | dc5698e | 2013-09-09 10:02:56 +1000 | [diff] [blame] | 290 | retry: |
| 291 | ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); |
| 292 | if (ret == -ENOSPC) { |
| 293 | spin_unlock(&vgdev->ctrlq.qlock); |
| 294 | wait_event(vgdev->ctrlq.ack_queue, vq->num_free); |
| 295 | spin_lock(&vgdev->ctrlq.qlock); |
| 296 | goto retry; |
| 297 | } else { |
| 298 | virtqueue_kick(vq); |
| 299 | } |
Dave Airlie | dc5698e | 2013-09-09 10:02:56 +1000 | [diff] [blame] | 300 | |
| 301 | if (!ret) |
| 302 | ret = vq->num_free; |
| 303 | return ret; |
| 304 | } |
| 305 | |
Gerd Hoffmann | 9c73f47 | 2015-08-19 23:35:57 +0200 | [diff] [blame] | 306 | static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, |
| 307 | struct virtio_gpu_vbuffer *vbuf) |
| 308 | { |
| 309 | int rc; |
| 310 | |
| 311 | spin_lock(&vgdev->ctrlq.qlock); |
| 312 | rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); |
| 313 | spin_unlock(&vgdev->ctrlq.qlock); |
| 314 | return rc; |
| 315 | } |
| 316 | |
Gerd Hoffmann | ec2f057 | 2015-08-19 23:44:15 +0200 | [diff] [blame] | 317 | static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, |
| 318 | struct virtio_gpu_vbuffer *vbuf, |
| 319 | struct virtio_gpu_ctrl_hdr *hdr, |
| 320 | struct virtio_gpu_fence **fence) |
| 321 | { |
| 322 | struct virtqueue *vq = vgdev->ctrlq.vq; |
| 323 | int rc; |
| 324 | |
| 325 | again: |
| 326 | spin_lock(&vgdev->ctrlq.qlock); |
| 327 | |
| 328 | /* |
| 329 | * Make sure we have enouth space in the virtqueue. If not |
| 330 | * wait here until we have. |
| 331 | * |
| 332 | * Without that virtio_gpu_queue_ctrl_buffer_nolock might have |
| 333 | * to wait for free space, which can result in fence ids being |
| 334 | * submitted out-of-order. |
| 335 | */ |
| 336 | if (vq->num_free < 3) { |
| 337 | spin_unlock(&vgdev->ctrlq.qlock); |
| 338 | wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3); |
| 339 | goto again; |
| 340 | } |
| 341 | |
| 342 | if (fence) |
| 343 | virtio_gpu_fence_emit(vgdev, hdr, fence); |
| 344 | rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); |
| 345 | spin_unlock(&vgdev->ctrlq.qlock); |
| 346 | return rc; |
| 347 | } |
| 348 | |
Dave Airlie | dc5698e | 2013-09-09 10:02:56 +1000 | [diff] [blame] | 349 | static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, |
| 350 | struct virtio_gpu_vbuffer *vbuf) |
| 351 | { |
| 352 | struct virtqueue *vq = vgdev->cursorq.vq; |
| 353 | struct scatterlist *sgs[1], ccmd; |
| 354 | int ret; |
| 355 | int outcnt; |
| 356 | |
| 357 | if (!vgdev->vqs_ready) |
| 358 | return -ENODEV; |
| 359 | |
| 360 | sg_init_one(&ccmd, vbuf->buf, vbuf->size); |
| 361 | sgs[0] = &ccmd; |
| 362 | outcnt = 1; |
| 363 | |
| 364 | spin_lock(&vgdev->cursorq.qlock); |
| 365 | retry: |
| 366 | ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC); |
| 367 | if (ret == -ENOSPC) { |
| 368 | spin_unlock(&vgdev->cursorq.qlock); |
| 369 | wait_event(vgdev->cursorq.ack_queue, vq->num_free); |
| 370 | spin_lock(&vgdev->cursorq.qlock); |
| 371 | goto retry; |
| 372 | } else { |
| 373 | virtqueue_kick(vq); |
| 374 | } |
| 375 | |
| 376 | spin_unlock(&vgdev->cursorq.qlock); |
| 377 | |
| 378 | if (!ret) |
| 379 | ret = vq->num_free; |
| 380 | return ret; |
| 381 | } |
| 382 | |
| 383 | /* just create gem objects for userspace and long lived objects, |
| 384 | just use dma_alloced pages for the queue objects? */ |
| 385 | |
| 386 | /* create a basic resource */ |
| 387 | void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, |
| 388 | uint32_t resource_id, |
| 389 | uint32_t format, |
| 390 | uint32_t width, |
| 391 | uint32_t height) |
| 392 | { |
| 393 | struct virtio_gpu_resource_create_2d *cmd_p; |
| 394 | struct virtio_gpu_vbuffer *vbuf; |
| 395 | |
| 396 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); |
| 397 | memset(cmd_p, 0, sizeof(*cmd_p)); |
| 398 | |
| 399 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D); |
| 400 | cmd_p->resource_id = cpu_to_le32(resource_id); |
| 401 | cmd_p->format = cpu_to_le32(format); |
| 402 | cmd_p->width = cpu_to_le32(width); |
| 403 | cmd_p->height = cpu_to_le32(height); |
| 404 | |
| 405 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
| 406 | } |
| 407 | |
| 408 | void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, |
| 409 | uint32_t resource_id) |
| 410 | { |
| 411 | struct virtio_gpu_resource_unref *cmd_p; |
| 412 | struct virtio_gpu_vbuffer *vbuf; |
| 413 | |
| 414 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); |
| 415 | memset(cmd_p, 0, sizeof(*cmd_p)); |
| 416 | |
| 417 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF); |
| 418 | cmd_p->resource_id = cpu_to_le32(resource_id); |
| 419 | |
| 420 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
| 421 | } |
| 422 | |
| 423 | void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev, |
| 424 | uint32_t resource_id) |
| 425 | { |
| 426 | struct virtio_gpu_resource_detach_backing *cmd_p; |
| 427 | struct virtio_gpu_vbuffer *vbuf; |
| 428 | |
| 429 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); |
| 430 | memset(cmd_p, 0, sizeof(*cmd_p)); |
| 431 | |
| 432 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING); |
| 433 | cmd_p->resource_id = cpu_to_le32(resource_id); |
| 434 | |
| 435 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
| 436 | } |
| 437 | |
| 438 | void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, |
| 439 | uint32_t scanout_id, uint32_t resource_id, |
| 440 | uint32_t width, uint32_t height, |
| 441 | uint32_t x, uint32_t y) |
| 442 | { |
| 443 | struct virtio_gpu_set_scanout *cmd_p; |
| 444 | struct virtio_gpu_vbuffer *vbuf; |
| 445 | |
| 446 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); |
| 447 | memset(cmd_p, 0, sizeof(*cmd_p)); |
| 448 | |
| 449 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT); |
| 450 | cmd_p->resource_id = cpu_to_le32(resource_id); |
| 451 | cmd_p->scanout_id = cpu_to_le32(scanout_id); |
| 452 | cmd_p->r.width = cpu_to_le32(width); |
| 453 | cmd_p->r.height = cpu_to_le32(height); |
| 454 | cmd_p->r.x = cpu_to_le32(x); |
| 455 | cmd_p->r.y = cpu_to_le32(y); |
| 456 | |
| 457 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
| 458 | } |
| 459 | |
| 460 | void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, |
| 461 | uint32_t resource_id, |
| 462 | uint32_t x, uint32_t y, |
| 463 | uint32_t width, uint32_t height) |
| 464 | { |
| 465 | struct virtio_gpu_resource_flush *cmd_p; |
| 466 | struct virtio_gpu_vbuffer *vbuf; |
| 467 | |
| 468 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); |
| 469 | memset(cmd_p, 0, sizeof(*cmd_p)); |
| 470 | |
| 471 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH); |
| 472 | cmd_p->resource_id = cpu_to_le32(resource_id); |
| 473 | cmd_p->r.width = cpu_to_le32(width); |
| 474 | cmd_p->r.height = cpu_to_le32(height); |
| 475 | cmd_p->r.x = cpu_to_le32(x); |
| 476 | cmd_p->r.y = cpu_to_le32(y); |
| 477 | |
| 478 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
| 479 | } |
| 480 | |
| 481 | void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, |
| 482 | uint32_t resource_id, uint64_t offset, |
| 483 | __le32 width, __le32 height, |
| 484 | __le32 x, __le32 y, |
| 485 | struct virtio_gpu_fence **fence) |
| 486 | { |
| 487 | struct virtio_gpu_transfer_to_host_2d *cmd_p; |
| 488 | struct virtio_gpu_vbuffer *vbuf; |
| 489 | |
| 490 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); |
| 491 | memset(cmd_p, 0, sizeof(*cmd_p)); |
| 492 | |
| 493 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D); |
| 494 | cmd_p->resource_id = cpu_to_le32(resource_id); |
| 495 | cmd_p->offset = cpu_to_le64(offset); |
| 496 | cmd_p->r.width = width; |
| 497 | cmd_p->r.height = height; |
| 498 | cmd_p->r.x = x; |
| 499 | cmd_p->r.y = y; |
| 500 | |
Gerd Hoffmann | ec2f057 | 2015-08-19 23:44:15 +0200 | [diff] [blame] | 501 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); |
Dave Airlie | dc5698e | 2013-09-09 10:02:56 +1000 | [diff] [blame] | 502 | } |
| 503 | |
| 504 | static void |
| 505 | virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev, |
| 506 | uint32_t resource_id, |
| 507 | struct virtio_gpu_mem_entry *ents, |
| 508 | uint32_t nents, |
| 509 | struct virtio_gpu_fence **fence) |
| 510 | { |
| 511 | struct virtio_gpu_resource_attach_backing *cmd_p; |
| 512 | struct virtio_gpu_vbuffer *vbuf; |
| 513 | |
| 514 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); |
| 515 | memset(cmd_p, 0, sizeof(*cmd_p)); |
| 516 | |
| 517 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING); |
| 518 | cmd_p->resource_id = cpu_to_le32(resource_id); |
| 519 | cmd_p->nr_entries = cpu_to_le32(nents); |
| 520 | |
| 521 | vbuf->data_buf = ents; |
| 522 | vbuf->data_size = sizeof(*ents) * nents; |
| 523 | |
Gerd Hoffmann | ec2f057 | 2015-08-19 23:44:15 +0200 | [diff] [blame] | 524 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); |
Dave Airlie | dc5698e | 2013-09-09 10:02:56 +1000 | [diff] [blame] | 525 | } |
| 526 | |
| 527 | static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev, |
| 528 | struct virtio_gpu_vbuffer *vbuf) |
| 529 | { |
| 530 | struct virtio_gpu_resp_display_info *resp = |
| 531 | (struct virtio_gpu_resp_display_info *)vbuf->resp_buf; |
| 532 | int i; |
| 533 | |
| 534 | spin_lock(&vgdev->display_info_lock); |
| 535 | for (i = 0; i < vgdev->num_scanouts; i++) { |
| 536 | vgdev->outputs[i].info = resp->pmodes[i]; |
| 537 | if (resp->pmodes[i].enabled) { |
| 538 | DRM_DEBUG("output %d: %dx%d+%d+%d", i, |
| 539 | le32_to_cpu(resp->pmodes[i].r.width), |
| 540 | le32_to_cpu(resp->pmodes[i].r.height), |
| 541 | le32_to_cpu(resp->pmodes[i].r.x), |
| 542 | le32_to_cpu(resp->pmodes[i].r.y)); |
| 543 | } else { |
| 544 | DRM_DEBUG("output %d: disabled", i); |
| 545 | } |
| 546 | } |
| 547 | |
Dave Airlie | 441012a | 2015-06-16 14:25:34 +1000 | [diff] [blame] | 548 | vgdev->display_info_pending = false; |
Dave Airlie | dc5698e | 2013-09-09 10:02:56 +1000 | [diff] [blame] | 549 | spin_unlock(&vgdev->display_info_lock); |
| 550 | wake_up(&vgdev->resp_wq); |
| 551 | |
| 552 | if (!drm_helper_hpd_irq_event(vgdev->ddev)) |
| 553 | drm_kms_helper_hotplug_event(vgdev->ddev); |
| 554 | } |
| 555 | |
Gerd Hoffmann | 62fb7a5 | 2014-10-28 12:48:00 +0100 | [diff] [blame] | 556 | static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev, |
| 557 | struct virtio_gpu_vbuffer *vbuf) |
| 558 | { |
| 559 | struct virtio_gpu_get_capset_info *cmd = |
| 560 | (struct virtio_gpu_get_capset_info *)vbuf->buf; |
| 561 | struct virtio_gpu_resp_capset_info *resp = |
| 562 | (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf; |
| 563 | int i = le32_to_cpu(cmd->capset_index); |
| 564 | |
| 565 | spin_lock(&vgdev->display_info_lock); |
| 566 | vgdev->capsets[i].id = le32_to_cpu(resp->capset_id); |
| 567 | vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version); |
| 568 | vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size); |
| 569 | spin_unlock(&vgdev->display_info_lock); |
| 570 | wake_up(&vgdev->resp_wq); |
| 571 | } |
| 572 | |
| 573 | static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev, |
| 574 | struct virtio_gpu_vbuffer *vbuf) |
| 575 | { |
| 576 | struct virtio_gpu_get_capset *cmd = |
| 577 | (struct virtio_gpu_get_capset *)vbuf->buf; |
| 578 | struct virtio_gpu_resp_capset *resp = |
| 579 | (struct virtio_gpu_resp_capset *)vbuf->resp_buf; |
| 580 | struct virtio_gpu_drv_cap_cache *cache_ent; |
| 581 | |
| 582 | spin_lock(&vgdev->display_info_lock); |
| 583 | list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { |
| 584 | if (cache_ent->version == le32_to_cpu(cmd->capset_version) && |
| 585 | cache_ent->id == le32_to_cpu(cmd->capset_id)) { |
| 586 | memcpy(cache_ent->caps_cache, resp->capset_data, |
| 587 | cache_ent->size); |
| 588 | atomic_set(&cache_ent->is_valid, 1); |
| 589 | break; |
| 590 | } |
| 591 | } |
| 592 | spin_unlock(&vgdev->display_info_lock); |
| 593 | wake_up(&vgdev->resp_wq); |
| 594 | } |
| 595 | |
| 596 | |
Dave Airlie | dc5698e | 2013-09-09 10:02:56 +1000 | [diff] [blame] | 597 | int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev) |
| 598 | { |
| 599 | struct virtio_gpu_ctrl_hdr *cmd_p; |
| 600 | struct virtio_gpu_vbuffer *vbuf; |
| 601 | void *resp_buf; |
| 602 | |
| 603 | resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info), |
| 604 | GFP_KERNEL); |
| 605 | if (!resp_buf) |
| 606 | return -ENOMEM; |
| 607 | |
| 608 | cmd_p = virtio_gpu_alloc_cmd_resp |
| 609 | (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf, |
| 610 | sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info), |
| 611 | resp_buf); |
| 612 | memset(cmd_p, 0, sizeof(*cmd_p)); |
| 613 | |
Dave Airlie | 441012a | 2015-06-16 14:25:34 +1000 | [diff] [blame] | 614 | vgdev->display_info_pending = true; |
Dave Airlie | dc5698e | 2013-09-09 10:02:56 +1000 | [diff] [blame] | 615 | cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO); |
| 616 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
| 617 | return 0; |
| 618 | } |
| 619 | |
Gerd Hoffmann | 62fb7a5 | 2014-10-28 12:48:00 +0100 | [diff] [blame] | 620 | int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx) |
| 621 | { |
| 622 | struct virtio_gpu_get_capset_info *cmd_p; |
| 623 | struct virtio_gpu_vbuffer *vbuf; |
| 624 | void *resp_buf; |
| 625 | |
| 626 | resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info), |
| 627 | GFP_KERNEL); |
| 628 | if (!resp_buf) |
| 629 | return -ENOMEM; |
| 630 | |
| 631 | cmd_p = virtio_gpu_alloc_cmd_resp |
| 632 | (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf, |
| 633 | sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info), |
| 634 | resp_buf); |
| 635 | memset(cmd_p, 0, sizeof(*cmd_p)); |
| 636 | |
| 637 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO); |
| 638 | cmd_p->capset_index = cpu_to_le32(idx); |
| 639 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
| 640 | return 0; |
| 641 | } |
| 642 | |
| 643 | int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev, |
| 644 | int idx, int version, |
| 645 | struct virtio_gpu_drv_cap_cache **cache_p) |
| 646 | { |
| 647 | struct virtio_gpu_get_capset *cmd_p; |
| 648 | struct virtio_gpu_vbuffer *vbuf; |
| 649 | int max_size = vgdev->capsets[idx].max_size; |
| 650 | struct virtio_gpu_drv_cap_cache *cache_ent; |
| 651 | void *resp_buf; |
| 652 | |
| 653 | if (idx > vgdev->num_capsets) |
| 654 | return -EINVAL; |
| 655 | |
| 656 | if (version > vgdev->capsets[idx].max_version) |
| 657 | return -EINVAL; |
| 658 | |
| 659 | cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL); |
| 660 | if (!cache_ent) |
| 661 | return -ENOMEM; |
| 662 | |
| 663 | cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL); |
| 664 | if (!cache_ent->caps_cache) { |
| 665 | kfree(cache_ent); |
| 666 | return -ENOMEM; |
| 667 | } |
| 668 | |
| 669 | resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size, |
| 670 | GFP_KERNEL); |
| 671 | if (!resp_buf) { |
| 672 | kfree(cache_ent->caps_cache); |
| 673 | kfree(cache_ent); |
| 674 | return -ENOMEM; |
| 675 | } |
| 676 | |
| 677 | cache_ent->version = version; |
| 678 | cache_ent->id = vgdev->capsets[idx].id; |
| 679 | atomic_set(&cache_ent->is_valid, 0); |
| 680 | cache_ent->size = max_size; |
| 681 | spin_lock(&vgdev->display_info_lock); |
| 682 | list_add_tail(&cache_ent->head, &vgdev->cap_cache); |
| 683 | spin_unlock(&vgdev->display_info_lock); |
| 684 | |
| 685 | cmd_p = virtio_gpu_alloc_cmd_resp |
| 686 | (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p), |
| 687 | sizeof(struct virtio_gpu_resp_capset) + max_size, |
| 688 | resp_buf); |
| 689 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET); |
| 690 | cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id); |
| 691 | cmd_p->capset_version = cpu_to_le32(version); |
| 692 | *cache_p = cache_ent; |
| 693 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
| 694 | |
| 695 | return 0; |
| 696 | } |
| 697 | |
| 698 | void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id, |
| 699 | uint32_t nlen, const char *name) |
| 700 | { |
| 701 | struct virtio_gpu_ctx_create *cmd_p; |
| 702 | struct virtio_gpu_vbuffer *vbuf; |
| 703 | |
| 704 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); |
| 705 | memset(cmd_p, 0, sizeof(*cmd_p)); |
| 706 | |
| 707 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE); |
| 708 | cmd_p->hdr.ctx_id = cpu_to_le32(id); |
| 709 | cmd_p->nlen = cpu_to_le32(nlen); |
| 710 | strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name)-1); |
| 711 | cmd_p->debug_name[sizeof(cmd_p->debug_name)-1] = 0; |
| 712 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
| 713 | } |
| 714 | |
| 715 | void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev, |
| 716 | uint32_t id) |
| 717 | { |
| 718 | struct virtio_gpu_ctx_destroy *cmd_p; |
| 719 | struct virtio_gpu_vbuffer *vbuf; |
| 720 | |
| 721 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); |
| 722 | memset(cmd_p, 0, sizeof(*cmd_p)); |
| 723 | |
| 724 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY); |
| 725 | cmd_p->hdr.ctx_id = cpu_to_le32(id); |
| 726 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
| 727 | } |
| 728 | |
| 729 | void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev, |
| 730 | uint32_t ctx_id, |
| 731 | uint32_t resource_id) |
| 732 | { |
| 733 | struct virtio_gpu_ctx_resource *cmd_p; |
| 734 | struct virtio_gpu_vbuffer *vbuf; |
| 735 | |
| 736 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); |
| 737 | memset(cmd_p, 0, sizeof(*cmd_p)); |
| 738 | |
| 739 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE); |
| 740 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); |
| 741 | cmd_p->resource_id = cpu_to_le32(resource_id); |
| 742 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
| 743 | |
| 744 | } |
| 745 | |
| 746 | void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, |
| 747 | uint32_t ctx_id, |
| 748 | uint32_t resource_id) |
| 749 | { |
| 750 | struct virtio_gpu_ctx_resource *cmd_p; |
| 751 | struct virtio_gpu_vbuffer *vbuf; |
| 752 | |
| 753 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); |
| 754 | memset(cmd_p, 0, sizeof(*cmd_p)); |
| 755 | |
| 756 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE); |
| 757 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); |
| 758 | cmd_p->resource_id = cpu_to_le32(resource_id); |
| 759 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
| 760 | } |
| 761 | |
| 762 | void |
| 763 | virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, |
| 764 | struct virtio_gpu_resource_create_3d *rc_3d, |
| 765 | struct virtio_gpu_fence **fence) |
| 766 | { |
| 767 | struct virtio_gpu_resource_create_3d *cmd_p; |
| 768 | struct virtio_gpu_vbuffer *vbuf; |
| 769 | |
| 770 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); |
| 771 | memset(cmd_p, 0, sizeof(*cmd_p)); |
| 772 | |
| 773 | *cmd_p = *rc_3d; |
| 774 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D); |
| 775 | cmd_p->hdr.flags = 0; |
| 776 | |
| 777 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); |
| 778 | } |
| 779 | |
| 780 | void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, |
| 781 | uint32_t resource_id, uint32_t ctx_id, |
| 782 | uint64_t offset, uint32_t level, |
| 783 | struct virtio_gpu_box *box, |
| 784 | struct virtio_gpu_fence **fence) |
| 785 | { |
| 786 | struct virtio_gpu_transfer_host_3d *cmd_p; |
| 787 | struct virtio_gpu_vbuffer *vbuf; |
| 788 | |
| 789 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); |
| 790 | memset(cmd_p, 0, sizeof(*cmd_p)); |
| 791 | |
| 792 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D); |
| 793 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); |
| 794 | cmd_p->resource_id = cpu_to_le32(resource_id); |
| 795 | cmd_p->box = *box; |
| 796 | cmd_p->offset = cpu_to_le64(offset); |
| 797 | cmd_p->level = cpu_to_le32(level); |
| 798 | |
| 799 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); |
| 800 | } |
| 801 | |
| 802 | void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, |
| 803 | uint32_t resource_id, uint32_t ctx_id, |
| 804 | uint64_t offset, uint32_t level, |
| 805 | struct virtio_gpu_box *box, |
| 806 | struct virtio_gpu_fence **fence) |
| 807 | { |
| 808 | struct virtio_gpu_transfer_host_3d *cmd_p; |
| 809 | struct virtio_gpu_vbuffer *vbuf; |
| 810 | |
| 811 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); |
| 812 | memset(cmd_p, 0, sizeof(*cmd_p)); |
| 813 | |
| 814 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D); |
| 815 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); |
| 816 | cmd_p->resource_id = cpu_to_le32(resource_id); |
| 817 | cmd_p->box = *box; |
| 818 | cmd_p->offset = cpu_to_le64(offset); |
| 819 | cmd_p->level = cpu_to_le32(level); |
| 820 | |
| 821 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); |
| 822 | } |
| 823 | |
| 824 | void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, |
| 825 | void *data, uint32_t data_size, |
| 826 | uint32_t ctx_id, struct virtio_gpu_fence **fence) |
| 827 | { |
| 828 | struct virtio_gpu_cmd_submit *cmd_p; |
| 829 | struct virtio_gpu_vbuffer *vbuf; |
| 830 | |
| 831 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); |
| 832 | memset(cmd_p, 0, sizeof(*cmd_p)); |
| 833 | |
| 834 | vbuf->data_buf = data; |
| 835 | vbuf->data_size = data_size; |
| 836 | |
| 837 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D); |
| 838 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); |
| 839 | cmd_p->size = cpu_to_le32(data_size); |
| 840 | |
| 841 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); |
| 842 | } |
| 843 | |
Dave Airlie | dc5698e | 2013-09-09 10:02:56 +1000 | [diff] [blame] | 844 | int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, |
| 845 | struct virtio_gpu_object *obj, |
| 846 | uint32_t resource_id, |
| 847 | struct virtio_gpu_fence **fence) |
| 848 | { |
| 849 | struct virtio_gpu_mem_entry *ents; |
| 850 | struct scatterlist *sg; |
| 851 | int si; |
| 852 | |
| 853 | if (!obj->pages) { |
| 854 | int ret; |
| 855 | ret = virtio_gpu_object_get_sg_table(vgdev, obj); |
| 856 | if (ret) |
| 857 | return ret; |
| 858 | } |
| 859 | |
| 860 | /* gets freed when the ring has consumed it */ |
| 861 | ents = kmalloc_array(obj->pages->nents, |
| 862 | sizeof(struct virtio_gpu_mem_entry), |
| 863 | GFP_KERNEL); |
| 864 | if (!ents) { |
| 865 | DRM_ERROR("failed to allocate ent list\n"); |
| 866 | return -ENOMEM; |
| 867 | } |
| 868 | |
| 869 | for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) { |
| 870 | ents[si].addr = cpu_to_le64(sg_phys(sg)); |
| 871 | ents[si].length = cpu_to_le32(sg->length); |
| 872 | ents[si].padding = 0; |
| 873 | } |
| 874 | |
| 875 | virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id, |
| 876 | ents, obj->pages->nents, |
| 877 | fence); |
| 878 | obj->hw_res_handle = resource_id; |
| 879 | return 0; |
| 880 | } |
| 881 | |
| 882 | void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, |
| 883 | struct virtio_gpu_output *output) |
| 884 | { |
| 885 | struct virtio_gpu_vbuffer *vbuf; |
| 886 | struct virtio_gpu_update_cursor *cur_p; |
| 887 | |
| 888 | output->cursor.pos.scanout_id = cpu_to_le32(output->index); |
| 889 | cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf); |
| 890 | memcpy(cur_p, &output->cursor, sizeof(output->cursor)); |
| 891 | virtio_gpu_queue_cursor(vgdev, vbuf); |
| 892 | } |