blob: abfc54a18f1af624645f9860147a75dd17c49284 [file] [log] [blame]
Dave Airliedc5698e2013-09-09 10:02:56 +10001/*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Authors:
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29#include <drm/drmP.h>
30#include "virtgpu_drv.h"
31#include <linux/virtio.h>
32#include <linux/virtio_config.h>
33#include <linux/virtio_ring.h>
34
35#define MAX_INLINE_CMD_SIZE 96
36#define MAX_INLINE_RESP_SIZE 24
37#define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
38 + MAX_INLINE_CMD_SIZE \
39 + MAX_INLINE_RESP_SIZE)
40
41void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
42 uint32_t *resid)
43{
44 int handle;
45
46 idr_preload(GFP_KERNEL);
47 spin_lock(&vgdev->resource_idr_lock);
48 handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
49 spin_unlock(&vgdev->resource_idr_lock);
50 idr_preload_end();
51 *resid = handle;
52}
53
54void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
55{
56 spin_lock(&vgdev->resource_idr_lock);
57 idr_remove(&vgdev->resource_idr, id);
58 spin_unlock(&vgdev->resource_idr_lock);
59}
60
61void virtio_gpu_ctrl_ack(struct virtqueue *vq)
62{
63 struct drm_device *dev = vq->vdev->priv;
64 struct virtio_gpu_device *vgdev = dev->dev_private;
65 schedule_work(&vgdev->ctrlq.dequeue_work);
66}
67
68void virtio_gpu_cursor_ack(struct virtqueue *vq)
69{
70 struct drm_device *dev = vq->vdev->priv;
71 struct virtio_gpu_device *vgdev = dev->dev_private;
72 schedule_work(&vgdev->cursorq.dequeue_work);
73}
74
75int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
76{
77 struct virtio_gpu_vbuffer *vbuf;
Gerd Hoffmann348a4b62016-11-15 09:46:48 +010078 int i, size, count = 16;
Dave Airliedc5698e2013-09-09 10:02:56 +100079 void *ptr;
80
81 INIT_LIST_HEAD(&vgdev->free_vbufs);
Dave Airlied5084f12015-06-16 15:42:13 +100082 spin_lock_init(&vgdev->free_vbufs_lock);
Dave Airliedc5698e2013-09-09 10:02:56 +100083 count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
84 count += virtqueue_get_vring_size(vgdev->cursorq.vq);
85 size = count * VBUFFER_SIZE;
86 DRM_INFO("virtio vbuffers: %d bufs, %zdB each, %dkB total.\n",
87 count, VBUFFER_SIZE, size / 1024);
88
89 vgdev->vbufs = kzalloc(size, GFP_KERNEL);
90 if (!vgdev->vbufs)
91 return -ENOMEM;
92
93 for (i = 0, ptr = vgdev->vbufs;
94 i < count;
95 i++, ptr += VBUFFER_SIZE) {
96 vbuf = ptr;
97 list_add(&vbuf->list, &vgdev->free_vbufs);
98 }
99 return 0;
100}
101
102void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
103{
104 struct virtio_gpu_vbuffer *vbuf;
105 int i, count = 0;
106
107 count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
108 count += virtqueue_get_vring_size(vgdev->cursorq.vq);
109
Dave Airlied5084f12015-06-16 15:42:13 +1000110 spin_lock(&vgdev->free_vbufs_lock);
Dave Airliedc5698e2013-09-09 10:02:56 +1000111 for (i = 0; i < count; i++) {
Michael S. Tsirkinf862e602016-12-05 22:36:56 +0200112 if (WARN_ON(list_empty(&vgdev->free_vbufs))) {
113 spin_unlock(&vgdev->free_vbufs_lock);
Dave Airliedc5698e2013-09-09 10:02:56 +1000114 return;
Michael S. Tsirkinf862e602016-12-05 22:36:56 +0200115 }
Dave Airliedc5698e2013-09-09 10:02:56 +1000116 vbuf = list_first_entry(&vgdev->free_vbufs,
117 struct virtio_gpu_vbuffer, list);
118 list_del(&vbuf->list);
119 }
Dave Airlied5084f12015-06-16 15:42:13 +1000120 spin_unlock(&vgdev->free_vbufs_lock);
Dave Airliedc5698e2013-09-09 10:02:56 +1000121 kfree(vgdev->vbufs);
122}
123
124static struct virtio_gpu_vbuffer*
125virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
126 int size, int resp_size, void *resp_buf,
127 virtio_gpu_resp_cb resp_cb)
128{
129 struct virtio_gpu_vbuffer *vbuf;
130
Dave Airlied5084f12015-06-16 15:42:13 +1000131 spin_lock(&vgdev->free_vbufs_lock);
Dave Airliedc5698e2013-09-09 10:02:56 +1000132 BUG_ON(list_empty(&vgdev->free_vbufs));
133 vbuf = list_first_entry(&vgdev->free_vbufs,
134 struct virtio_gpu_vbuffer, list);
135 list_del(&vbuf->list);
Dave Airlied5084f12015-06-16 15:42:13 +1000136 spin_unlock(&vgdev->free_vbufs_lock);
Dave Airliedc5698e2013-09-09 10:02:56 +1000137 memset(vbuf, 0, VBUFFER_SIZE);
138
139 BUG_ON(size > MAX_INLINE_CMD_SIZE);
140 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
141 vbuf->size = size;
142
143 vbuf->resp_cb = resp_cb;
144 vbuf->resp_size = resp_size;
145 if (resp_size <= MAX_INLINE_RESP_SIZE)
146 vbuf->resp_buf = (void *)vbuf->buf + size;
147 else
148 vbuf->resp_buf = resp_buf;
149 BUG_ON(!vbuf->resp_buf);
150 return vbuf;
151}
152
153static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
154 struct virtio_gpu_vbuffer **vbuffer_p,
155 int size)
156{
157 struct virtio_gpu_vbuffer *vbuf;
158
159 vbuf = virtio_gpu_get_vbuf(vgdev, size,
160 sizeof(struct virtio_gpu_ctrl_hdr),
161 NULL, NULL);
162 if (IS_ERR(vbuf)) {
163 *vbuffer_p = NULL;
164 return ERR_CAST(vbuf);
165 }
166 *vbuffer_p = vbuf;
167 return vbuf->buf;
168}
169
170static struct virtio_gpu_update_cursor*
171virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
172 struct virtio_gpu_vbuffer **vbuffer_p)
173{
174 struct virtio_gpu_vbuffer *vbuf;
175
176 vbuf = virtio_gpu_get_vbuf
177 (vgdev, sizeof(struct virtio_gpu_update_cursor),
178 0, NULL, NULL);
179 if (IS_ERR(vbuf)) {
180 *vbuffer_p = NULL;
181 return ERR_CAST(vbuf);
182 }
183 *vbuffer_p = vbuf;
184 return (struct virtio_gpu_update_cursor *)vbuf->buf;
185}
186
187static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
188 virtio_gpu_resp_cb cb,
189 struct virtio_gpu_vbuffer **vbuffer_p,
190 int cmd_size, int resp_size,
191 void *resp_buf)
192{
193 struct virtio_gpu_vbuffer *vbuf;
194
195 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
196 resp_size, resp_buf, cb);
197 if (IS_ERR(vbuf)) {
198 *vbuffer_p = NULL;
199 return ERR_CAST(vbuf);
200 }
201 *vbuffer_p = vbuf;
202 return (struct virtio_gpu_command *)vbuf->buf;
203}
204
205static void free_vbuf(struct virtio_gpu_device *vgdev,
206 struct virtio_gpu_vbuffer *vbuf)
207{
208 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
209 kfree(vbuf->resp_buf);
210 kfree(vbuf->data_buf);
Dave Airlied5084f12015-06-16 15:42:13 +1000211 spin_lock(&vgdev->free_vbufs_lock);
Dave Airliedc5698e2013-09-09 10:02:56 +1000212 list_add(&vbuf->list, &vgdev->free_vbufs);
Dave Airlied5084f12015-06-16 15:42:13 +1000213 spin_unlock(&vgdev->free_vbufs_lock);
Dave Airliedc5698e2013-09-09 10:02:56 +1000214}
215
216static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
217{
218 struct virtio_gpu_vbuffer *vbuf;
219 unsigned int len;
220 int freed = 0;
221
222 while ((vbuf = virtqueue_get_buf(vq, &len))) {
223 list_add_tail(&vbuf->list, reclaim_list);
224 freed++;
225 }
226 if (freed == 0)
227 DRM_DEBUG("Huh? zero vbufs reclaimed");
228}
229
230void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
231{
232 struct virtio_gpu_device *vgdev =
233 container_of(work, struct virtio_gpu_device,
234 ctrlq.dequeue_work);
235 struct list_head reclaim_list;
236 struct virtio_gpu_vbuffer *entry, *tmp;
237 struct virtio_gpu_ctrl_hdr *resp;
238 u64 fence_id = 0;
239
240 INIT_LIST_HEAD(&reclaim_list);
241 spin_lock(&vgdev->ctrlq.qlock);
242 do {
243 virtqueue_disable_cb(vgdev->ctrlq.vq);
244 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
245
246 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
247 spin_unlock(&vgdev->ctrlq.qlock);
248
249 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
250 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
251 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
252 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
253 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
254 u64 f = le64_to_cpu(resp->fence_id);
255
256 if (fence_id > f) {
257 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
258 __func__, fence_id, f);
259 } else {
260 fence_id = f;
261 }
262 }
263 if (entry->resp_cb)
264 entry->resp_cb(vgdev, entry);
265
266 list_del(&entry->list);
267 free_vbuf(vgdev, entry);
268 }
269 wake_up(&vgdev->ctrlq.ack_queue);
270
271 if (fence_id)
272 virtio_gpu_fence_event_process(vgdev, fence_id);
273}
274
275void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
276{
277 struct virtio_gpu_device *vgdev =
278 container_of(work, struct virtio_gpu_device,
279 cursorq.dequeue_work);
280 struct list_head reclaim_list;
281 struct virtio_gpu_vbuffer *entry, *tmp;
282
283 INIT_LIST_HEAD(&reclaim_list);
284 spin_lock(&vgdev->cursorq.qlock);
285 do {
286 virtqueue_disable_cb(vgdev->cursorq.vq);
287 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
288 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
289 spin_unlock(&vgdev->cursorq.qlock);
290
291 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
292 list_del(&entry->list);
293 free_vbuf(vgdev, entry);
294 }
295 wake_up(&vgdev->cursorq.ack_queue);
296}
297
Gerd Hoffmann9c73f472015-08-19 23:35:57 +0200298static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
299 struct virtio_gpu_vbuffer *vbuf)
Dave Airliedc5698e2013-09-09 10:02:56 +1000300{
301 struct virtqueue *vq = vgdev->ctrlq.vq;
302 struct scatterlist *sgs[3], vcmd, vout, vresp;
303 int outcnt = 0, incnt = 0;
304 int ret;
305
306 if (!vgdev->vqs_ready)
307 return -ENODEV;
308
309 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
310 sgs[outcnt+incnt] = &vcmd;
311 outcnt++;
312
313 if (vbuf->data_size) {
314 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
315 sgs[outcnt + incnt] = &vout;
316 outcnt++;
317 }
318
319 if (vbuf->resp_size) {
320 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
321 sgs[outcnt + incnt] = &vresp;
322 incnt++;
323 }
324
Dave Airliedc5698e2013-09-09 10:02:56 +1000325retry:
326 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
327 if (ret == -ENOSPC) {
328 spin_unlock(&vgdev->ctrlq.qlock);
329 wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
330 spin_lock(&vgdev->ctrlq.qlock);
331 goto retry;
332 } else {
333 virtqueue_kick(vq);
334 }
Dave Airliedc5698e2013-09-09 10:02:56 +1000335
336 if (!ret)
337 ret = vq->num_free;
338 return ret;
339}
340
Gerd Hoffmann9c73f472015-08-19 23:35:57 +0200341static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
342 struct virtio_gpu_vbuffer *vbuf)
343{
344 int rc;
345
346 spin_lock(&vgdev->ctrlq.qlock);
347 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
348 spin_unlock(&vgdev->ctrlq.qlock);
349 return rc;
350}
351
Gerd Hoffmannec2f0572015-08-19 23:44:15 +0200352static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
353 struct virtio_gpu_vbuffer *vbuf,
354 struct virtio_gpu_ctrl_hdr *hdr,
355 struct virtio_gpu_fence **fence)
356{
357 struct virtqueue *vq = vgdev->ctrlq.vq;
358 int rc;
359
360again:
361 spin_lock(&vgdev->ctrlq.qlock);
362
363 /*
364 * Make sure we have enouth space in the virtqueue. If not
365 * wait here until we have.
366 *
367 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
368 * to wait for free space, which can result in fence ids being
369 * submitted out-of-order.
370 */
371 if (vq->num_free < 3) {
372 spin_unlock(&vgdev->ctrlq.qlock);
373 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
374 goto again;
375 }
376
377 if (fence)
378 virtio_gpu_fence_emit(vgdev, hdr, fence);
379 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
380 spin_unlock(&vgdev->ctrlq.qlock);
381 return rc;
382}
383
Dave Airliedc5698e2013-09-09 10:02:56 +1000384static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
385 struct virtio_gpu_vbuffer *vbuf)
386{
387 struct virtqueue *vq = vgdev->cursorq.vq;
388 struct scatterlist *sgs[1], ccmd;
389 int ret;
390 int outcnt;
391
392 if (!vgdev->vqs_ready)
393 return -ENODEV;
394
395 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
396 sgs[0] = &ccmd;
397 outcnt = 1;
398
399 spin_lock(&vgdev->cursorq.qlock);
400retry:
401 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
402 if (ret == -ENOSPC) {
403 spin_unlock(&vgdev->cursorq.qlock);
404 wait_event(vgdev->cursorq.ack_queue, vq->num_free);
405 spin_lock(&vgdev->cursorq.qlock);
406 goto retry;
407 } else {
408 virtqueue_kick(vq);
409 }
410
411 spin_unlock(&vgdev->cursorq.qlock);
412
413 if (!ret)
414 ret = vq->num_free;
415 return ret;
416}
417
418/* just create gem objects for userspace and long lived objects,
419 just use dma_alloced pages for the queue objects? */
420
421/* create a basic resource */
422void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
423 uint32_t resource_id,
424 uint32_t format,
425 uint32_t width,
426 uint32_t height)
427{
428 struct virtio_gpu_resource_create_2d *cmd_p;
429 struct virtio_gpu_vbuffer *vbuf;
430
431 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
432 memset(cmd_p, 0, sizeof(*cmd_p));
433
434 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
435 cmd_p->resource_id = cpu_to_le32(resource_id);
436 cmd_p->format = cpu_to_le32(format);
437 cmd_p->width = cpu_to_le32(width);
438 cmd_p->height = cpu_to_le32(height);
439
440 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
441}
442
443void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
444 uint32_t resource_id)
445{
446 struct virtio_gpu_resource_unref *cmd_p;
447 struct virtio_gpu_vbuffer *vbuf;
448
449 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
450 memset(cmd_p, 0, sizeof(*cmd_p));
451
452 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
453 cmd_p->resource_id = cpu_to_le32(resource_id);
454
455 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
456}
457
458void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
459 uint32_t resource_id)
460{
461 struct virtio_gpu_resource_detach_backing *cmd_p;
462 struct virtio_gpu_vbuffer *vbuf;
463
464 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
465 memset(cmd_p, 0, sizeof(*cmd_p));
466
467 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
468 cmd_p->resource_id = cpu_to_le32(resource_id);
469
470 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
471}
472
473void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
474 uint32_t scanout_id, uint32_t resource_id,
475 uint32_t width, uint32_t height,
476 uint32_t x, uint32_t y)
477{
478 struct virtio_gpu_set_scanout *cmd_p;
479 struct virtio_gpu_vbuffer *vbuf;
480
481 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
482 memset(cmd_p, 0, sizeof(*cmd_p));
483
484 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
485 cmd_p->resource_id = cpu_to_le32(resource_id);
486 cmd_p->scanout_id = cpu_to_le32(scanout_id);
487 cmd_p->r.width = cpu_to_le32(width);
488 cmd_p->r.height = cpu_to_le32(height);
489 cmd_p->r.x = cpu_to_le32(x);
490 cmd_p->r.y = cpu_to_le32(y);
491
492 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
493}
494
495void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
496 uint32_t resource_id,
497 uint32_t x, uint32_t y,
498 uint32_t width, uint32_t height)
499{
500 struct virtio_gpu_resource_flush *cmd_p;
501 struct virtio_gpu_vbuffer *vbuf;
502
503 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
504 memset(cmd_p, 0, sizeof(*cmd_p));
505
506 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
507 cmd_p->resource_id = cpu_to_le32(resource_id);
508 cmd_p->r.width = cpu_to_le32(width);
509 cmd_p->r.height = cpu_to_le32(height);
510 cmd_p->r.x = cpu_to_le32(x);
511 cmd_p->r.y = cpu_to_le32(y);
512
513 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
514}
515
516void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
517 uint32_t resource_id, uint64_t offset,
518 __le32 width, __le32 height,
519 __le32 x, __le32 y,
520 struct virtio_gpu_fence **fence)
521{
522 struct virtio_gpu_transfer_to_host_2d *cmd_p;
523 struct virtio_gpu_vbuffer *vbuf;
524
525 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
526 memset(cmd_p, 0, sizeof(*cmd_p));
527
528 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
529 cmd_p->resource_id = cpu_to_le32(resource_id);
530 cmd_p->offset = cpu_to_le64(offset);
531 cmd_p->r.width = width;
532 cmd_p->r.height = height;
533 cmd_p->r.x = x;
534 cmd_p->r.y = y;
535
Gerd Hoffmannec2f0572015-08-19 23:44:15 +0200536 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
Dave Airliedc5698e2013-09-09 10:02:56 +1000537}
538
539static void
540virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
541 uint32_t resource_id,
542 struct virtio_gpu_mem_entry *ents,
543 uint32_t nents,
544 struct virtio_gpu_fence **fence)
545{
546 struct virtio_gpu_resource_attach_backing *cmd_p;
547 struct virtio_gpu_vbuffer *vbuf;
548
549 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
550 memset(cmd_p, 0, sizeof(*cmd_p));
551
552 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
553 cmd_p->resource_id = cpu_to_le32(resource_id);
554 cmd_p->nr_entries = cpu_to_le32(nents);
555
556 vbuf->data_buf = ents;
557 vbuf->data_size = sizeof(*ents) * nents;
558
Gerd Hoffmannec2f0572015-08-19 23:44:15 +0200559 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
Dave Airliedc5698e2013-09-09 10:02:56 +1000560}
561
562static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
563 struct virtio_gpu_vbuffer *vbuf)
564{
565 struct virtio_gpu_resp_display_info *resp =
566 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
567 int i;
568
569 spin_lock(&vgdev->display_info_lock);
570 for (i = 0; i < vgdev->num_scanouts; i++) {
571 vgdev->outputs[i].info = resp->pmodes[i];
572 if (resp->pmodes[i].enabled) {
573 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
574 le32_to_cpu(resp->pmodes[i].r.width),
575 le32_to_cpu(resp->pmodes[i].r.height),
576 le32_to_cpu(resp->pmodes[i].r.x),
577 le32_to_cpu(resp->pmodes[i].r.y));
578 } else {
579 DRM_DEBUG("output %d: disabled", i);
580 }
581 }
582
Dave Airlie441012a2015-06-16 14:25:34 +1000583 vgdev->display_info_pending = false;
Dave Airliedc5698e2013-09-09 10:02:56 +1000584 spin_unlock(&vgdev->display_info_lock);
585 wake_up(&vgdev->resp_wq);
586
587 if (!drm_helper_hpd_irq_event(vgdev->ddev))
588 drm_kms_helper_hotplug_event(vgdev->ddev);
589}
590
Gerd Hoffmann62fb7a52014-10-28 12:48:00 +0100591static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
592 struct virtio_gpu_vbuffer *vbuf)
593{
594 struct virtio_gpu_get_capset_info *cmd =
595 (struct virtio_gpu_get_capset_info *)vbuf->buf;
596 struct virtio_gpu_resp_capset_info *resp =
597 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
598 int i = le32_to_cpu(cmd->capset_index);
599
600 spin_lock(&vgdev->display_info_lock);
601 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
602 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
603 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
604 spin_unlock(&vgdev->display_info_lock);
605 wake_up(&vgdev->resp_wq);
606}
607
608static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
609 struct virtio_gpu_vbuffer *vbuf)
610{
611 struct virtio_gpu_get_capset *cmd =
612 (struct virtio_gpu_get_capset *)vbuf->buf;
613 struct virtio_gpu_resp_capset *resp =
614 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
615 struct virtio_gpu_drv_cap_cache *cache_ent;
616
617 spin_lock(&vgdev->display_info_lock);
618 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
619 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
620 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
621 memcpy(cache_ent->caps_cache, resp->capset_data,
622 cache_ent->size);
623 atomic_set(&cache_ent->is_valid, 1);
624 break;
625 }
626 }
627 spin_unlock(&vgdev->display_info_lock);
628 wake_up(&vgdev->resp_wq);
629}
630
631
Dave Airliedc5698e2013-09-09 10:02:56 +1000632int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
633{
634 struct virtio_gpu_ctrl_hdr *cmd_p;
635 struct virtio_gpu_vbuffer *vbuf;
636 void *resp_buf;
637
638 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
639 GFP_KERNEL);
640 if (!resp_buf)
641 return -ENOMEM;
642
643 cmd_p = virtio_gpu_alloc_cmd_resp
644 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
645 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
646 resp_buf);
647 memset(cmd_p, 0, sizeof(*cmd_p));
648
Dave Airlie441012a2015-06-16 14:25:34 +1000649 vgdev->display_info_pending = true;
Dave Airliedc5698e2013-09-09 10:02:56 +1000650 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
651 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
652 return 0;
653}
654
Gerd Hoffmann62fb7a52014-10-28 12:48:00 +0100655int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
656{
657 struct virtio_gpu_get_capset_info *cmd_p;
658 struct virtio_gpu_vbuffer *vbuf;
659 void *resp_buf;
660
661 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
662 GFP_KERNEL);
663 if (!resp_buf)
664 return -ENOMEM;
665
666 cmd_p = virtio_gpu_alloc_cmd_resp
667 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
668 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
669 resp_buf);
670 memset(cmd_p, 0, sizeof(*cmd_p));
671
672 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
673 cmd_p->capset_index = cpu_to_le32(idx);
674 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
675 return 0;
676}
677
678int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
679 int idx, int version,
680 struct virtio_gpu_drv_cap_cache **cache_p)
681{
682 struct virtio_gpu_get_capset *cmd_p;
683 struct virtio_gpu_vbuffer *vbuf;
684 int max_size = vgdev->capsets[idx].max_size;
685 struct virtio_gpu_drv_cap_cache *cache_ent;
686 void *resp_buf;
687
688 if (idx > vgdev->num_capsets)
689 return -EINVAL;
690
691 if (version > vgdev->capsets[idx].max_version)
692 return -EINVAL;
693
694 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
695 if (!cache_ent)
696 return -ENOMEM;
697
698 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
699 if (!cache_ent->caps_cache) {
700 kfree(cache_ent);
701 return -ENOMEM;
702 }
703
704 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
705 GFP_KERNEL);
706 if (!resp_buf) {
707 kfree(cache_ent->caps_cache);
708 kfree(cache_ent);
709 return -ENOMEM;
710 }
711
712 cache_ent->version = version;
713 cache_ent->id = vgdev->capsets[idx].id;
714 atomic_set(&cache_ent->is_valid, 0);
715 cache_ent->size = max_size;
716 spin_lock(&vgdev->display_info_lock);
717 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
718 spin_unlock(&vgdev->display_info_lock);
719
720 cmd_p = virtio_gpu_alloc_cmd_resp
721 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
722 sizeof(struct virtio_gpu_resp_capset) + max_size,
723 resp_buf);
724 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
725 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
726 cmd_p->capset_version = cpu_to_le32(version);
727 *cache_p = cache_ent;
728 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
729
730 return 0;
731}
732
733void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
734 uint32_t nlen, const char *name)
735{
736 struct virtio_gpu_ctx_create *cmd_p;
737 struct virtio_gpu_vbuffer *vbuf;
738
739 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
740 memset(cmd_p, 0, sizeof(*cmd_p));
741
742 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
743 cmd_p->hdr.ctx_id = cpu_to_le32(id);
744 cmd_p->nlen = cpu_to_le32(nlen);
745 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name)-1);
746 cmd_p->debug_name[sizeof(cmd_p->debug_name)-1] = 0;
747 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
748}
749
750void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
751 uint32_t id)
752{
753 struct virtio_gpu_ctx_destroy *cmd_p;
754 struct virtio_gpu_vbuffer *vbuf;
755
756 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
757 memset(cmd_p, 0, sizeof(*cmd_p));
758
759 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
760 cmd_p->hdr.ctx_id = cpu_to_le32(id);
761 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
762}
763
764void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
765 uint32_t ctx_id,
766 uint32_t resource_id)
767{
768 struct virtio_gpu_ctx_resource *cmd_p;
769 struct virtio_gpu_vbuffer *vbuf;
770
771 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
772 memset(cmd_p, 0, sizeof(*cmd_p));
773
774 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
775 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
776 cmd_p->resource_id = cpu_to_le32(resource_id);
777 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
778
779}
780
781void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
782 uint32_t ctx_id,
783 uint32_t resource_id)
784{
785 struct virtio_gpu_ctx_resource *cmd_p;
786 struct virtio_gpu_vbuffer *vbuf;
787
788 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
789 memset(cmd_p, 0, sizeof(*cmd_p));
790
791 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
792 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
793 cmd_p->resource_id = cpu_to_le32(resource_id);
794 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
795}
796
797void
798virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
799 struct virtio_gpu_resource_create_3d *rc_3d,
800 struct virtio_gpu_fence **fence)
801{
802 struct virtio_gpu_resource_create_3d *cmd_p;
803 struct virtio_gpu_vbuffer *vbuf;
804
805 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
806 memset(cmd_p, 0, sizeof(*cmd_p));
807
808 *cmd_p = *rc_3d;
809 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
810 cmd_p->hdr.flags = 0;
811
812 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
813}
814
815void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
816 uint32_t resource_id, uint32_t ctx_id,
817 uint64_t offset, uint32_t level,
818 struct virtio_gpu_box *box,
819 struct virtio_gpu_fence **fence)
820{
821 struct virtio_gpu_transfer_host_3d *cmd_p;
822 struct virtio_gpu_vbuffer *vbuf;
823
824 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
825 memset(cmd_p, 0, sizeof(*cmd_p));
826
827 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
828 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
829 cmd_p->resource_id = cpu_to_le32(resource_id);
830 cmd_p->box = *box;
831 cmd_p->offset = cpu_to_le64(offset);
832 cmd_p->level = cpu_to_le32(level);
833
834 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
835}
836
837void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
838 uint32_t resource_id, uint32_t ctx_id,
839 uint64_t offset, uint32_t level,
840 struct virtio_gpu_box *box,
841 struct virtio_gpu_fence **fence)
842{
843 struct virtio_gpu_transfer_host_3d *cmd_p;
844 struct virtio_gpu_vbuffer *vbuf;
845
846 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
847 memset(cmd_p, 0, sizeof(*cmd_p));
848
849 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
850 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
851 cmd_p->resource_id = cpu_to_le32(resource_id);
852 cmd_p->box = *box;
853 cmd_p->offset = cpu_to_le64(offset);
854 cmd_p->level = cpu_to_le32(level);
855
856 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
857}
858
859void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
860 void *data, uint32_t data_size,
861 uint32_t ctx_id, struct virtio_gpu_fence **fence)
862{
863 struct virtio_gpu_cmd_submit *cmd_p;
864 struct virtio_gpu_vbuffer *vbuf;
865
866 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
867 memset(cmd_p, 0, sizeof(*cmd_p));
868
869 vbuf->data_buf = data;
870 vbuf->data_size = data_size;
871
872 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
873 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
874 cmd_p->size = cpu_to_le32(data_size);
875
876 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
877}
878
Dave Airliedc5698e2013-09-09 10:02:56 +1000879int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
880 struct virtio_gpu_object *obj,
881 uint32_t resource_id,
882 struct virtio_gpu_fence **fence)
883{
884 struct virtio_gpu_mem_entry *ents;
885 struct scatterlist *sg;
886 int si;
887
888 if (!obj->pages) {
889 int ret;
890 ret = virtio_gpu_object_get_sg_table(vgdev, obj);
891 if (ret)
892 return ret;
893 }
894
895 /* gets freed when the ring has consumed it */
896 ents = kmalloc_array(obj->pages->nents,
897 sizeof(struct virtio_gpu_mem_entry),
898 GFP_KERNEL);
899 if (!ents) {
900 DRM_ERROR("failed to allocate ent list\n");
901 return -ENOMEM;
902 }
903
904 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
905 ents[si].addr = cpu_to_le64(sg_phys(sg));
906 ents[si].length = cpu_to_le32(sg->length);
907 ents[si].padding = 0;
908 }
909
910 virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
911 ents, obj->pages->nents,
912 fence);
913 obj->hw_res_handle = resource_id;
914 return 0;
915}
916
917void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
918 struct virtio_gpu_output *output)
919{
920 struct virtio_gpu_vbuffer *vbuf;
921 struct virtio_gpu_update_cursor *cur_p;
922
923 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
924 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
925 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
926 virtio_gpu_queue_cursor(vgdev, vbuf);
927}