blob: c5067926401fb4c9ddeaf7cf35166a5e915233fa [file] [log] [blame]
Dave Airliedc5698e2013-09-09 10:02:56 +10001/*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Authors:
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29#include <drm/drmP.h>
30#include "virtgpu_drv.h"
31#include <linux/virtio.h>
32#include <linux/virtio_config.h>
33#include <linux/virtio_ring.h>
34
35#define MAX_INLINE_CMD_SIZE 96
36#define MAX_INLINE_RESP_SIZE 24
37#define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
38 + MAX_INLINE_CMD_SIZE \
39 + MAX_INLINE_RESP_SIZE)
40
41void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
42 uint32_t *resid)
43{
44 int handle;
45
46 idr_preload(GFP_KERNEL);
47 spin_lock(&vgdev->resource_idr_lock);
48 handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
49 spin_unlock(&vgdev->resource_idr_lock);
50 idr_preload_end();
51 *resid = handle;
52}
53
54void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
55{
56 spin_lock(&vgdev->resource_idr_lock);
57 idr_remove(&vgdev->resource_idr, id);
58 spin_unlock(&vgdev->resource_idr_lock);
59}
60
61void virtio_gpu_ctrl_ack(struct virtqueue *vq)
62{
63 struct drm_device *dev = vq->vdev->priv;
64 struct virtio_gpu_device *vgdev = dev->dev_private;
65 schedule_work(&vgdev->ctrlq.dequeue_work);
66}
67
68void virtio_gpu_cursor_ack(struct virtqueue *vq)
69{
70 struct drm_device *dev = vq->vdev->priv;
71 struct virtio_gpu_device *vgdev = dev->dev_private;
72 schedule_work(&vgdev->cursorq.dequeue_work);
73}
74
75int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
76{
77 struct virtio_gpu_vbuffer *vbuf;
78 int i, size, count = 0;
79 void *ptr;
80
81 INIT_LIST_HEAD(&vgdev->free_vbufs);
82 count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
83 count += virtqueue_get_vring_size(vgdev->cursorq.vq);
84 size = count * VBUFFER_SIZE;
85 DRM_INFO("virtio vbuffers: %d bufs, %zdB each, %dkB total.\n",
86 count, VBUFFER_SIZE, size / 1024);
87
88 vgdev->vbufs = kzalloc(size, GFP_KERNEL);
89 if (!vgdev->vbufs)
90 return -ENOMEM;
91
92 for (i = 0, ptr = vgdev->vbufs;
93 i < count;
94 i++, ptr += VBUFFER_SIZE) {
95 vbuf = ptr;
96 list_add(&vbuf->list, &vgdev->free_vbufs);
97 }
98 return 0;
99}
100
101void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
102{
103 struct virtio_gpu_vbuffer *vbuf;
104 int i, count = 0;
105
106 count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
107 count += virtqueue_get_vring_size(vgdev->cursorq.vq);
108
109 for (i = 0; i < count; i++) {
110 if (WARN_ON(list_empty(&vgdev->free_vbufs)))
111 return;
112 vbuf = list_first_entry(&vgdev->free_vbufs,
113 struct virtio_gpu_vbuffer, list);
114 list_del(&vbuf->list);
115 }
116 kfree(vgdev->vbufs);
117}
118
119static struct virtio_gpu_vbuffer*
120virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
121 int size, int resp_size, void *resp_buf,
122 virtio_gpu_resp_cb resp_cb)
123{
124 struct virtio_gpu_vbuffer *vbuf;
125
126 BUG_ON(list_empty(&vgdev->free_vbufs));
127 vbuf = list_first_entry(&vgdev->free_vbufs,
128 struct virtio_gpu_vbuffer, list);
129 list_del(&vbuf->list);
130 memset(vbuf, 0, VBUFFER_SIZE);
131
132 BUG_ON(size > MAX_INLINE_CMD_SIZE);
133 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
134 vbuf->size = size;
135
136 vbuf->resp_cb = resp_cb;
137 vbuf->resp_size = resp_size;
138 if (resp_size <= MAX_INLINE_RESP_SIZE)
139 vbuf->resp_buf = (void *)vbuf->buf + size;
140 else
141 vbuf->resp_buf = resp_buf;
142 BUG_ON(!vbuf->resp_buf);
143 return vbuf;
144}
145
146static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
147 struct virtio_gpu_vbuffer **vbuffer_p,
148 int size)
149{
150 struct virtio_gpu_vbuffer *vbuf;
151
152 vbuf = virtio_gpu_get_vbuf(vgdev, size,
153 sizeof(struct virtio_gpu_ctrl_hdr),
154 NULL, NULL);
155 if (IS_ERR(vbuf)) {
156 *vbuffer_p = NULL;
157 return ERR_CAST(vbuf);
158 }
159 *vbuffer_p = vbuf;
160 return vbuf->buf;
161}
162
163static struct virtio_gpu_update_cursor*
164virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
165 struct virtio_gpu_vbuffer **vbuffer_p)
166{
167 struct virtio_gpu_vbuffer *vbuf;
168
169 vbuf = virtio_gpu_get_vbuf
170 (vgdev, sizeof(struct virtio_gpu_update_cursor),
171 0, NULL, NULL);
172 if (IS_ERR(vbuf)) {
173 *vbuffer_p = NULL;
174 return ERR_CAST(vbuf);
175 }
176 *vbuffer_p = vbuf;
177 return (struct virtio_gpu_update_cursor *)vbuf->buf;
178}
179
180static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
181 virtio_gpu_resp_cb cb,
182 struct virtio_gpu_vbuffer **vbuffer_p,
183 int cmd_size, int resp_size,
184 void *resp_buf)
185{
186 struct virtio_gpu_vbuffer *vbuf;
187
188 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
189 resp_size, resp_buf, cb);
190 if (IS_ERR(vbuf)) {
191 *vbuffer_p = NULL;
192 return ERR_CAST(vbuf);
193 }
194 *vbuffer_p = vbuf;
195 return (struct virtio_gpu_command *)vbuf->buf;
196}
197
198static void free_vbuf(struct virtio_gpu_device *vgdev,
199 struct virtio_gpu_vbuffer *vbuf)
200{
201 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
202 kfree(vbuf->resp_buf);
203 kfree(vbuf->data_buf);
204 list_add(&vbuf->list, &vgdev->free_vbufs);
205}
206
207static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
208{
209 struct virtio_gpu_vbuffer *vbuf;
210 unsigned int len;
211 int freed = 0;
212
213 while ((vbuf = virtqueue_get_buf(vq, &len))) {
214 list_add_tail(&vbuf->list, reclaim_list);
215 freed++;
216 }
217 if (freed == 0)
218 DRM_DEBUG("Huh? zero vbufs reclaimed");
219}
220
221void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
222{
223 struct virtio_gpu_device *vgdev =
224 container_of(work, struct virtio_gpu_device,
225 ctrlq.dequeue_work);
226 struct list_head reclaim_list;
227 struct virtio_gpu_vbuffer *entry, *tmp;
228 struct virtio_gpu_ctrl_hdr *resp;
229 u64 fence_id = 0;
230
231 INIT_LIST_HEAD(&reclaim_list);
232 spin_lock(&vgdev->ctrlq.qlock);
233 do {
234 virtqueue_disable_cb(vgdev->ctrlq.vq);
235 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
236
237 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
238 spin_unlock(&vgdev->ctrlq.qlock);
239
240 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
241 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
242 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
243 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
244 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
245 u64 f = le64_to_cpu(resp->fence_id);
246
247 if (fence_id > f) {
248 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
249 __func__, fence_id, f);
250 } else {
251 fence_id = f;
252 }
253 }
254 if (entry->resp_cb)
255 entry->resp_cb(vgdev, entry);
256
257 list_del(&entry->list);
258 free_vbuf(vgdev, entry);
259 }
260 wake_up(&vgdev->ctrlq.ack_queue);
261
262 if (fence_id)
263 virtio_gpu_fence_event_process(vgdev, fence_id);
264}
265
266void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
267{
268 struct virtio_gpu_device *vgdev =
269 container_of(work, struct virtio_gpu_device,
270 cursorq.dequeue_work);
271 struct list_head reclaim_list;
272 struct virtio_gpu_vbuffer *entry, *tmp;
273
274 INIT_LIST_HEAD(&reclaim_list);
275 spin_lock(&vgdev->cursorq.qlock);
276 do {
277 virtqueue_disable_cb(vgdev->cursorq.vq);
278 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
279 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
280 spin_unlock(&vgdev->cursorq.qlock);
281
282 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
283 list_del(&entry->list);
284 free_vbuf(vgdev, entry);
285 }
286 wake_up(&vgdev->cursorq.ack_queue);
287}
288
289static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
290 struct virtio_gpu_vbuffer *vbuf)
291{
292 struct virtqueue *vq = vgdev->ctrlq.vq;
293 struct scatterlist *sgs[3], vcmd, vout, vresp;
294 int outcnt = 0, incnt = 0;
295 int ret;
296
297 if (!vgdev->vqs_ready)
298 return -ENODEV;
299
300 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
301 sgs[outcnt+incnt] = &vcmd;
302 outcnt++;
303
304 if (vbuf->data_size) {
305 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
306 sgs[outcnt + incnt] = &vout;
307 outcnt++;
308 }
309
310 if (vbuf->resp_size) {
311 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
312 sgs[outcnt + incnt] = &vresp;
313 incnt++;
314 }
315
316 spin_lock(&vgdev->ctrlq.qlock);
317retry:
318 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
319 if (ret == -ENOSPC) {
320 spin_unlock(&vgdev->ctrlq.qlock);
321 wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
322 spin_lock(&vgdev->ctrlq.qlock);
323 goto retry;
324 } else {
325 virtqueue_kick(vq);
326 }
327 spin_unlock(&vgdev->ctrlq.qlock);
328
329 if (!ret)
330 ret = vq->num_free;
331 return ret;
332}
333
334static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
335 struct virtio_gpu_vbuffer *vbuf)
336{
337 struct virtqueue *vq = vgdev->cursorq.vq;
338 struct scatterlist *sgs[1], ccmd;
339 int ret;
340 int outcnt;
341
342 if (!vgdev->vqs_ready)
343 return -ENODEV;
344
345 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
346 sgs[0] = &ccmd;
347 outcnt = 1;
348
349 spin_lock(&vgdev->cursorq.qlock);
350retry:
351 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
352 if (ret == -ENOSPC) {
353 spin_unlock(&vgdev->cursorq.qlock);
354 wait_event(vgdev->cursorq.ack_queue, vq->num_free);
355 spin_lock(&vgdev->cursorq.qlock);
356 goto retry;
357 } else {
358 virtqueue_kick(vq);
359 }
360
361 spin_unlock(&vgdev->cursorq.qlock);
362
363 if (!ret)
364 ret = vq->num_free;
365 return ret;
366}
367
368/* just create gem objects for userspace and long lived objects,
369 just use dma_alloced pages for the queue objects? */
370
371/* create a basic resource */
372void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
373 uint32_t resource_id,
374 uint32_t format,
375 uint32_t width,
376 uint32_t height)
377{
378 struct virtio_gpu_resource_create_2d *cmd_p;
379 struct virtio_gpu_vbuffer *vbuf;
380
381 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
382 memset(cmd_p, 0, sizeof(*cmd_p));
383
384 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
385 cmd_p->resource_id = cpu_to_le32(resource_id);
386 cmd_p->format = cpu_to_le32(format);
387 cmd_p->width = cpu_to_le32(width);
388 cmd_p->height = cpu_to_le32(height);
389
390 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
391}
392
393void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
394 uint32_t resource_id)
395{
396 struct virtio_gpu_resource_unref *cmd_p;
397 struct virtio_gpu_vbuffer *vbuf;
398
399 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
400 memset(cmd_p, 0, sizeof(*cmd_p));
401
402 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
403 cmd_p->resource_id = cpu_to_le32(resource_id);
404
405 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
406}
407
408void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
409 uint32_t resource_id)
410{
411 struct virtio_gpu_resource_detach_backing *cmd_p;
412 struct virtio_gpu_vbuffer *vbuf;
413
414 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
415 memset(cmd_p, 0, sizeof(*cmd_p));
416
417 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
418 cmd_p->resource_id = cpu_to_le32(resource_id);
419
420 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
421}
422
423void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
424 uint32_t scanout_id, uint32_t resource_id,
425 uint32_t width, uint32_t height,
426 uint32_t x, uint32_t y)
427{
428 struct virtio_gpu_set_scanout *cmd_p;
429 struct virtio_gpu_vbuffer *vbuf;
430
431 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
432 memset(cmd_p, 0, sizeof(*cmd_p));
433
434 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
435 cmd_p->resource_id = cpu_to_le32(resource_id);
436 cmd_p->scanout_id = cpu_to_le32(scanout_id);
437 cmd_p->r.width = cpu_to_le32(width);
438 cmd_p->r.height = cpu_to_le32(height);
439 cmd_p->r.x = cpu_to_le32(x);
440 cmd_p->r.y = cpu_to_le32(y);
441
442 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
443}
444
445void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
446 uint32_t resource_id,
447 uint32_t x, uint32_t y,
448 uint32_t width, uint32_t height)
449{
450 struct virtio_gpu_resource_flush *cmd_p;
451 struct virtio_gpu_vbuffer *vbuf;
452
453 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
454 memset(cmd_p, 0, sizeof(*cmd_p));
455
456 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
457 cmd_p->resource_id = cpu_to_le32(resource_id);
458 cmd_p->r.width = cpu_to_le32(width);
459 cmd_p->r.height = cpu_to_le32(height);
460 cmd_p->r.x = cpu_to_le32(x);
461 cmd_p->r.y = cpu_to_le32(y);
462
463 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
464}
465
466void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
467 uint32_t resource_id, uint64_t offset,
468 __le32 width, __le32 height,
469 __le32 x, __le32 y,
470 struct virtio_gpu_fence **fence)
471{
472 struct virtio_gpu_transfer_to_host_2d *cmd_p;
473 struct virtio_gpu_vbuffer *vbuf;
474
475 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
476 memset(cmd_p, 0, sizeof(*cmd_p));
477
478 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
479 cmd_p->resource_id = cpu_to_le32(resource_id);
480 cmd_p->offset = cpu_to_le64(offset);
481 cmd_p->r.width = width;
482 cmd_p->r.height = height;
483 cmd_p->r.x = x;
484 cmd_p->r.y = y;
485
486 if (fence)
487 virtio_gpu_fence_emit(vgdev, &cmd_p->hdr, fence);
488 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
489}
490
491static void
492virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
493 uint32_t resource_id,
494 struct virtio_gpu_mem_entry *ents,
495 uint32_t nents,
496 struct virtio_gpu_fence **fence)
497{
498 struct virtio_gpu_resource_attach_backing *cmd_p;
499 struct virtio_gpu_vbuffer *vbuf;
500
501 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
502 memset(cmd_p, 0, sizeof(*cmd_p));
503
504 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
505 cmd_p->resource_id = cpu_to_le32(resource_id);
506 cmd_p->nr_entries = cpu_to_le32(nents);
507
508 vbuf->data_buf = ents;
509 vbuf->data_size = sizeof(*ents) * nents;
510
511 if (fence)
512 virtio_gpu_fence_emit(vgdev, &cmd_p->hdr, fence);
513 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
514}
515
516static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
517 struct virtio_gpu_vbuffer *vbuf)
518{
519 struct virtio_gpu_resp_display_info *resp =
520 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
521 int i;
522
523 spin_lock(&vgdev->display_info_lock);
524 for (i = 0; i < vgdev->num_scanouts; i++) {
525 vgdev->outputs[i].info = resp->pmodes[i];
526 if (resp->pmodes[i].enabled) {
527 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
528 le32_to_cpu(resp->pmodes[i].r.width),
529 le32_to_cpu(resp->pmodes[i].r.height),
530 le32_to_cpu(resp->pmodes[i].r.x),
531 le32_to_cpu(resp->pmodes[i].r.y));
532 } else {
533 DRM_DEBUG("output %d: disabled", i);
534 }
535 }
536
Dave Airlie441012a2015-06-16 14:25:34 +1000537 vgdev->display_info_pending = false;
Dave Airliedc5698e2013-09-09 10:02:56 +1000538 spin_unlock(&vgdev->display_info_lock);
539 wake_up(&vgdev->resp_wq);
540
541 if (!drm_helper_hpd_irq_event(vgdev->ddev))
542 drm_kms_helper_hotplug_event(vgdev->ddev);
543}
544
545int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
546{
547 struct virtio_gpu_ctrl_hdr *cmd_p;
548 struct virtio_gpu_vbuffer *vbuf;
549 void *resp_buf;
550
551 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
552 GFP_KERNEL);
553 if (!resp_buf)
554 return -ENOMEM;
555
556 cmd_p = virtio_gpu_alloc_cmd_resp
557 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
558 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
559 resp_buf);
560 memset(cmd_p, 0, sizeof(*cmd_p));
561
Dave Airlie441012a2015-06-16 14:25:34 +1000562 vgdev->display_info_pending = true;
Dave Airliedc5698e2013-09-09 10:02:56 +1000563 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
564 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
565 return 0;
566}
567
568int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
569 struct virtio_gpu_object *obj,
570 uint32_t resource_id,
571 struct virtio_gpu_fence **fence)
572{
573 struct virtio_gpu_mem_entry *ents;
574 struct scatterlist *sg;
575 int si;
576
577 if (!obj->pages) {
578 int ret;
579 ret = virtio_gpu_object_get_sg_table(vgdev, obj);
580 if (ret)
581 return ret;
582 }
583
584 /* gets freed when the ring has consumed it */
585 ents = kmalloc_array(obj->pages->nents,
586 sizeof(struct virtio_gpu_mem_entry),
587 GFP_KERNEL);
588 if (!ents) {
589 DRM_ERROR("failed to allocate ent list\n");
590 return -ENOMEM;
591 }
592
593 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
594 ents[si].addr = cpu_to_le64(sg_phys(sg));
595 ents[si].length = cpu_to_le32(sg->length);
596 ents[si].padding = 0;
597 }
598
599 virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
600 ents, obj->pages->nents,
601 fence);
602 obj->hw_res_handle = resource_id;
603 return 0;
604}
605
606void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
607 struct virtio_gpu_output *output)
608{
609 struct virtio_gpu_vbuffer *vbuf;
610 struct virtio_gpu_update_cursor *cur_p;
611
612 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
613 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
614 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
615 virtio_gpu_queue_cursor(vgdev, vbuf);
616}