blob: 979da1c246a543a445257daee03787e794b266a1 [file] [log] [blame]
Thomas Hellstrom543831c2012-11-20 12:19:36 +00001/**************************************************************************
2 *
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_resource_priv.h"
30#include <ttm/ttm_placement.h>
Thomas Hellstrom7e8d9da2012-11-20 12:19:37 +000031#include "svga3d_surfacedefs.h"
Thomas Hellstrom543831c2012-11-20 12:19:36 +000032
33/**
34 * struct vmw_user_surface - User-space visible surface resource
35 *
36 * @base: The TTM base object handling user-space visibility.
37 * @srf: The surface metadata.
38 * @size: TTM accounting size for the surface.
39 */
40struct vmw_user_surface {
Thomas Hellstrom79e5f812013-11-08 02:12:51 -080041 struct ttm_prime_object prime;
Thomas Hellstrom543831c2012-11-20 12:19:36 +000042 struct vmw_surface srf;
43 uint32_t size;
Thomas Hellstrom543831c2012-11-20 12:19:36 +000044};
45
46/**
47 * struct vmw_surface_offset - Backing store mip level offset info
48 *
49 * @face: Surface face.
50 * @mip: Mip level.
51 * @bo_offset: Offset into backing store of this mip level.
52 *
53 */
54struct vmw_surface_offset {
55 uint32_t face;
56 uint32_t mip;
57 uint32_t bo_offset;
58};
59
60static void vmw_user_surface_free(struct vmw_resource *res);
61static struct vmw_resource *
62vmw_user_surface_base_to_res(struct ttm_base_object *base);
63static int vmw_legacy_srf_bind(struct vmw_resource *res,
64 struct ttm_validate_buffer *val_buf);
65static int vmw_legacy_srf_unbind(struct vmw_resource *res,
66 bool readback,
67 struct ttm_validate_buffer *val_buf);
68static int vmw_legacy_srf_create(struct vmw_resource *res);
69static int vmw_legacy_srf_destroy(struct vmw_resource *res);
Thomas Hellstroma97e2192012-11-21 11:45:13 +010070static int vmw_gb_surface_create(struct vmw_resource *res);
71static int vmw_gb_surface_bind(struct vmw_resource *res,
72 struct ttm_validate_buffer *val_buf);
73static int vmw_gb_surface_unbind(struct vmw_resource *res,
74 bool readback,
75 struct ttm_validate_buffer *val_buf);
76static int vmw_gb_surface_destroy(struct vmw_resource *res);
77
Thomas Hellstrom543831c2012-11-20 12:19:36 +000078
79static const struct vmw_user_resource_conv user_surface_conv = {
80 .object_type = VMW_RES_SURFACE,
81 .base_obj_to_res = vmw_user_surface_base_to_res,
82 .res_free = vmw_user_surface_free
83};
84
85const struct vmw_user_resource_conv *user_surface_converter =
86 &user_surface_conv;
87
88
89static uint64_t vmw_user_surface_size;
90
91static const struct vmw_res_func vmw_legacy_surface_func = {
92 .res_type = vmw_res_surface,
93 .needs_backup = false,
94 .may_evict = true,
95 .type_name = "legacy surfaces",
96 .backup_placement = &vmw_srf_placement,
97 .create = &vmw_legacy_srf_create,
98 .destroy = &vmw_legacy_srf_destroy,
99 .bind = &vmw_legacy_srf_bind,
100 .unbind = &vmw_legacy_srf_unbind
101};
102
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100103static const struct vmw_res_func vmw_gb_surface_func = {
104 .res_type = vmw_res_surface,
105 .needs_backup = true,
106 .may_evict = true,
107 .type_name = "guest backed surfaces",
108 .backup_placement = &vmw_mob_placement,
109 .create = vmw_gb_surface_create,
110 .destroy = vmw_gb_surface_destroy,
111 .bind = vmw_gb_surface_bind,
112 .unbind = vmw_gb_surface_unbind
113};
114
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000115/**
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000116 * struct vmw_surface_dma - SVGA3D DMA command
117 */
118struct vmw_surface_dma {
119 SVGA3dCmdHeader header;
120 SVGA3dCmdSurfaceDMA body;
121 SVGA3dCopyBox cb;
122 SVGA3dCmdSurfaceDMASuffix suffix;
123};
124
125/**
126 * struct vmw_surface_define - SVGA3D Surface Define command
127 */
128struct vmw_surface_define {
129 SVGA3dCmdHeader header;
130 SVGA3dCmdDefineSurface body;
131};
132
133/**
134 * struct vmw_surface_destroy - SVGA3D Surface Destroy command
135 */
136struct vmw_surface_destroy {
137 SVGA3dCmdHeader header;
138 SVGA3dCmdDestroySurface body;
139};
140
141
142/**
143 * vmw_surface_dma_size - Compute fifo size for a dma command.
144 *
145 * @srf: Pointer to a struct vmw_surface
146 *
147 * Computes the required size for a surface dma command for backup or
148 * restoration of the surface represented by @srf.
149 */
150static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
151{
152 return srf->num_sizes * sizeof(struct vmw_surface_dma);
153}
154
155
156/**
157 * vmw_surface_define_size - Compute fifo size for a surface define command.
158 *
159 * @srf: Pointer to a struct vmw_surface
160 *
161 * Computes the required size for a surface define command for the definition
162 * of the surface represented by @srf.
163 */
164static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
165{
166 return sizeof(struct vmw_surface_define) + srf->num_sizes *
167 sizeof(SVGA3dSize);
168}
169
170
171/**
172 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
173 *
174 * Computes the required size for a surface destroy command for the destruction
175 * of a hw surface.
176 */
177static inline uint32_t vmw_surface_destroy_size(void)
178{
179 return sizeof(struct vmw_surface_destroy);
180}
181
182/**
183 * vmw_surface_destroy_encode - Encode a surface_destroy command.
184 *
185 * @id: The surface id
186 * @cmd_space: Pointer to memory area in which the commands should be encoded.
187 */
188static void vmw_surface_destroy_encode(uint32_t id,
189 void *cmd_space)
190{
191 struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
192 cmd_space;
193
194 cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
195 cmd->header.size = sizeof(cmd->body);
196 cmd->body.sid = id;
197}
198
199/**
200 * vmw_surface_define_encode - Encode a surface_define command.
201 *
202 * @srf: Pointer to a struct vmw_surface object.
203 * @cmd_space: Pointer to memory area in which the commands should be encoded.
204 */
205static void vmw_surface_define_encode(const struct vmw_surface *srf,
206 void *cmd_space)
207{
208 struct vmw_surface_define *cmd = (struct vmw_surface_define *)
209 cmd_space;
210 struct drm_vmw_size *src_size;
211 SVGA3dSize *cmd_size;
212 uint32_t cmd_len;
213 int i;
214
215 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
216
217 cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
218 cmd->header.size = cmd_len;
219 cmd->body.sid = srf->res.id;
220 cmd->body.surfaceFlags = srf->flags;
221 cmd->body.format = cpu_to_le32(srf->format);
222 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
223 cmd->body.face[i].numMipLevels = srf->mip_levels[i];
224
225 cmd += 1;
226 cmd_size = (SVGA3dSize *) cmd;
227 src_size = srf->sizes;
228
229 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
230 cmd_size->width = src_size->width;
231 cmd_size->height = src_size->height;
232 cmd_size->depth = src_size->depth;
233 }
234}
235
236/**
237 * vmw_surface_dma_encode - Encode a surface_dma command.
238 *
239 * @srf: Pointer to a struct vmw_surface object.
240 * @cmd_space: Pointer to memory area in which the commands should be encoded.
241 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
242 * should be placed or read from.
243 * @to_surface: Boolean whether to DMA to the surface or from the surface.
244 */
245static void vmw_surface_dma_encode(struct vmw_surface *srf,
246 void *cmd_space,
247 const SVGAGuestPtr *ptr,
248 bool to_surface)
249{
250 uint32_t i;
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000251 struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
Thomas Hellstrom7e8d9da2012-11-20 12:19:37 +0000252 const struct svga3d_surface_desc *desc =
253 svga3dsurface_get_desc(srf->format);
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000254
255 for (i = 0; i < srf->num_sizes; ++i) {
256 SVGA3dCmdHeader *header = &cmd->header;
257 SVGA3dCmdSurfaceDMA *body = &cmd->body;
258 SVGA3dCopyBox *cb = &cmd->cb;
259 SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
260 const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
261 const struct drm_vmw_size *cur_size = &srf->sizes[i];
262
263 header->id = SVGA_3D_CMD_SURFACE_DMA;
264 header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
265
266 body->guest.ptr = *ptr;
267 body->guest.ptr.offset += cur_offset->bo_offset;
Thomas Hellstrom7e8d9da2012-11-20 12:19:37 +0000268 body->guest.pitch = svga3dsurface_calculate_pitch(desc,
269 cur_size);
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000270 body->host.sid = srf->res.id;
271 body->host.face = cur_offset->face;
272 body->host.mipmap = cur_offset->mip;
273 body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
274 SVGA3D_READ_HOST_VRAM);
275 cb->x = 0;
276 cb->y = 0;
277 cb->z = 0;
278 cb->srcx = 0;
279 cb->srcy = 0;
280 cb->srcz = 0;
281 cb->w = cur_size->width;
282 cb->h = cur_size->height;
283 cb->d = cur_size->depth;
284
285 suffix->suffixSize = sizeof(*suffix);
Thomas Hellstrom7e8d9da2012-11-20 12:19:37 +0000286 suffix->maximumOffset =
287 svga3dsurface_get_image_buffer_size(desc, cur_size,
288 body->guest.pitch);
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000289 suffix->flags.discard = 0;
290 suffix->flags.unsynchronized = 0;
291 suffix->flags.reserved = 0;
292 ++cmd;
293 }
294};
295
296
297/**
298 * vmw_hw_surface_destroy - destroy a Device surface
299 *
300 * @res: Pointer to a struct vmw_resource embedded in a struct
301 * vmw_surface.
302 *
303 * Destroys a the device surface associated with a struct vmw_surface if
304 * any, and adjusts accounting and resource count accordingly.
305 */
306static void vmw_hw_surface_destroy(struct vmw_resource *res)
307{
308
309 struct vmw_private *dev_priv = res->dev_priv;
310 struct vmw_surface *srf;
311 void *cmd;
312
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100313 if (res->func->destroy == vmw_gb_surface_destroy) {
314 (void) vmw_gb_surface_destroy(res);
315 return;
316 }
317
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000318 if (res->id != -1) {
319
320 cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
321 if (unlikely(cmd == NULL)) {
322 DRM_ERROR("Failed reserving FIFO space for surface "
323 "destruction.\n");
324 return;
325 }
326
327 vmw_surface_destroy_encode(res->id, cmd);
328 vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
329
330 /*
331 * used_memory_size_atomic, or separate lock
332 * to avoid taking dev_priv::cmdbuf_mutex in
333 * the destroy path.
334 */
335
336 mutex_lock(&dev_priv->cmdbuf_mutex);
337 srf = vmw_res_to_srf(res);
338 dev_priv->used_memory_size -= res->backup_size;
339 mutex_unlock(&dev_priv->cmdbuf_mutex);
340 }
341 vmw_3d_resource_dec(dev_priv, false);
342}
343
344/**
345 * vmw_legacy_srf_create - Create a device surface as part of the
346 * resource validation process.
347 *
348 * @res: Pointer to a struct vmw_surface.
349 *
350 * If the surface doesn't have a hw id.
351 *
352 * Returns -EBUSY if there wasn't sufficient device resources to
353 * complete the validation. Retry after freeing up resources.
354 *
355 * May return other errors if the kernel is out of guest resources.
356 */
357static int vmw_legacy_srf_create(struct vmw_resource *res)
358{
359 struct vmw_private *dev_priv = res->dev_priv;
360 struct vmw_surface *srf;
361 uint32_t submit_size;
362 uint8_t *cmd;
363 int ret;
364
365 if (likely(res->id != -1))
366 return 0;
367
368 srf = vmw_res_to_srf(res);
369 if (unlikely(dev_priv->used_memory_size + res->backup_size >=
370 dev_priv->memory_size))
371 return -EBUSY;
372
373 /*
374 * Alloc id for the resource.
375 */
376
377 ret = vmw_resource_alloc_id(res);
378 if (unlikely(ret != 0)) {
379 DRM_ERROR("Failed to allocate a surface id.\n");
380 goto out_no_id;
381 }
382
383 if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
384 ret = -EBUSY;
385 goto out_no_fifo;
386 }
387
388 /*
389 * Encode surface define- commands.
390 */
391
392 submit_size = vmw_surface_define_size(srf);
393 cmd = vmw_fifo_reserve(dev_priv, submit_size);
394 if (unlikely(cmd == NULL)) {
395 DRM_ERROR("Failed reserving FIFO space for surface "
396 "creation.\n");
397 ret = -ENOMEM;
398 goto out_no_fifo;
399 }
400
401 vmw_surface_define_encode(srf, cmd);
402 vmw_fifo_commit(dev_priv, submit_size);
403 /*
404 * Surface memory usage accounting.
405 */
406
407 dev_priv->used_memory_size += res->backup_size;
408 return 0;
409
410out_no_fifo:
411 vmw_resource_release_id(res);
412out_no_id:
413 return ret;
414}
415
416/**
417 * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
418 *
419 * @res: Pointer to a struct vmw_res embedded in a struct
420 * vmw_surface.
421 * @val_buf: Pointer to a struct ttm_validate_buffer containing
422 * information about the backup buffer.
423 * @bind: Boolean wether to DMA to the surface.
424 *
425 * Transfer backup data to or from a legacy surface as part of the
426 * validation process.
427 * May return other errors if the kernel is out of guest resources.
428 * The backup buffer will be fenced or idle upon successful completion,
429 * and if the surface needs persistent backup storage, the backup buffer
430 * will also be returned reserved iff @bind is true.
431 */
432static int vmw_legacy_srf_dma(struct vmw_resource *res,
433 struct ttm_validate_buffer *val_buf,
434 bool bind)
435{
436 SVGAGuestPtr ptr;
437 struct vmw_fence_obj *fence;
438 uint32_t submit_size;
439 struct vmw_surface *srf = vmw_res_to_srf(res);
440 uint8_t *cmd;
441 struct vmw_private *dev_priv = res->dev_priv;
442
443 BUG_ON(val_buf->bo == NULL);
444
445 submit_size = vmw_surface_dma_size(srf);
446 cmd = vmw_fifo_reserve(dev_priv, submit_size);
447 if (unlikely(cmd == NULL)) {
448 DRM_ERROR("Failed reserving FIFO space for surface "
449 "DMA.\n");
450 return -ENOMEM;
451 }
452 vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
453 vmw_surface_dma_encode(srf, cmd, &ptr, bind);
454
455 vmw_fifo_commit(dev_priv, submit_size);
456
457 /*
458 * Create a fence object and fence the backup buffer.
459 */
460
461 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
462 &fence, NULL);
463
464 vmw_fence_single_bo(val_buf->bo, fence);
465
466 if (likely(fence != NULL))
467 vmw_fence_obj_unreference(&fence);
468
469 return 0;
470}
471
472/**
473 * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
474 * surface validation process.
475 *
476 * @res: Pointer to a struct vmw_res embedded in a struct
477 * vmw_surface.
478 * @val_buf: Pointer to a struct ttm_validate_buffer containing
479 * information about the backup buffer.
480 *
481 * This function will copy backup data to the surface if the
482 * backup buffer is dirty.
483 */
484static int vmw_legacy_srf_bind(struct vmw_resource *res,
485 struct ttm_validate_buffer *val_buf)
486{
487 if (!res->backup_dirty)
488 return 0;
489
490 return vmw_legacy_srf_dma(res, val_buf, true);
491}
492
493
494/**
495 * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
496 * surface eviction process.
497 *
498 * @res: Pointer to a struct vmw_res embedded in a struct
499 * vmw_surface.
500 * @val_buf: Pointer to a struct ttm_validate_buffer containing
501 * information about the backup buffer.
502 *
503 * This function will copy backup data from the surface.
504 */
505static int vmw_legacy_srf_unbind(struct vmw_resource *res,
506 bool readback,
507 struct ttm_validate_buffer *val_buf)
508{
509 if (unlikely(readback))
510 return vmw_legacy_srf_dma(res, val_buf, false);
511 return 0;
512}
513
514/**
515 * vmw_legacy_srf_destroy - Destroy a device surface as part of a
516 * resource eviction process.
517 *
518 * @res: Pointer to a struct vmw_res embedded in a struct
519 * vmw_surface.
520 */
521static int vmw_legacy_srf_destroy(struct vmw_resource *res)
522{
523 struct vmw_private *dev_priv = res->dev_priv;
524 uint32_t submit_size;
525 uint8_t *cmd;
526
527 BUG_ON(res->id == -1);
528
529 /*
530 * Encode the dma- and surface destroy commands.
531 */
532
533 submit_size = vmw_surface_destroy_size();
534 cmd = vmw_fifo_reserve(dev_priv, submit_size);
535 if (unlikely(cmd == NULL)) {
536 DRM_ERROR("Failed reserving FIFO space for surface "
537 "eviction.\n");
538 return -ENOMEM;
539 }
540
541 vmw_surface_destroy_encode(res->id, cmd);
542 vmw_fifo_commit(dev_priv, submit_size);
543
544 /*
545 * Surface memory usage accounting.
546 */
547
548 dev_priv->used_memory_size -= res->backup_size;
549
550 /*
551 * Release the surface ID.
552 */
553
554 vmw_resource_release_id(res);
555
556 return 0;
557}
558
559
560/**
561 * vmw_surface_init - initialize a struct vmw_surface
562 *
563 * @dev_priv: Pointer to a device private struct.
564 * @srf: Pointer to the struct vmw_surface to initialize.
565 * @res_free: Pointer to a resource destructor used to free
566 * the object.
567 */
568static int vmw_surface_init(struct vmw_private *dev_priv,
569 struct vmw_surface *srf,
570 void (*res_free) (struct vmw_resource *res))
571{
572 int ret;
573 struct vmw_resource *res = &srf->res;
574
575 BUG_ON(res_free == NULL);
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100576 if (!dev_priv->has_mob)
577 (void) vmw_3d_resource_inc(dev_priv, false);
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000578 ret = vmw_resource_init(dev_priv, res, true, res_free,
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100579 (dev_priv->has_mob) ? &vmw_gb_surface_func :
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000580 &vmw_legacy_surface_func);
581
582 if (unlikely(ret != 0)) {
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100583 if (!dev_priv->has_mob)
584 vmw_3d_resource_dec(dev_priv, false);
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000585 res_free(res);
586 return ret;
587 }
588
589 /*
590 * The surface won't be visible to hardware until a
591 * surface validate.
592 */
593
594 vmw_resource_activate(res, vmw_hw_surface_destroy);
595 return ret;
596}
597
598/**
599 * vmw_user_surface_base_to_res - TTM base object to resource converter for
600 * user visible surfaces
601 *
602 * @base: Pointer to a TTM base object
603 *
604 * Returns the struct vmw_resource embedded in a struct vmw_surface
605 * for the user-visible object identified by the TTM base object @base.
606 */
607static struct vmw_resource *
608vmw_user_surface_base_to_res(struct ttm_base_object *base)
609{
Thomas Hellstrom79e5f812013-11-08 02:12:51 -0800610 return &(container_of(base, struct vmw_user_surface,
611 prime.base)->srf.res);
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000612}
613
614/**
615 * vmw_user_surface_free - User visible surface resource destructor
616 *
617 * @res: A struct vmw_resource embedded in a struct vmw_surface.
618 */
619static void vmw_user_surface_free(struct vmw_resource *res)
620{
621 struct vmw_surface *srf = vmw_res_to_srf(res);
622 struct vmw_user_surface *user_srf =
623 container_of(srf, struct vmw_user_surface, srf);
624 struct vmw_private *dev_priv = srf->res.dev_priv;
625 uint32_t size = user_srf->size;
626
627 kfree(srf->offsets);
628 kfree(srf->sizes);
629 kfree(srf->snooper.image);
Thomas Hellstrom79e5f812013-11-08 02:12:51 -0800630 ttm_prime_object_kfree(user_srf, prime);
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000631 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
632}
633
634/**
635 * vmw_user_surface_free - User visible surface TTM base object destructor
636 *
637 * @p_base: Pointer to a pointer to a TTM base object
638 * embedded in a struct vmw_user_surface.
639 *
640 * Drops the base object's reference on its resource, and the
641 * pointer pointed to by *p_base is set to NULL.
642 */
643static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
644{
645 struct ttm_base_object *base = *p_base;
646 struct vmw_user_surface *user_srf =
Thomas Hellstrom79e5f812013-11-08 02:12:51 -0800647 container_of(base, struct vmw_user_surface, prime.base);
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000648 struct vmw_resource *res = &user_srf->srf.res;
649
650 *p_base = NULL;
651 vmw_resource_unreference(&res);
652}
653
654/**
655 * vmw_user_surface_destroy_ioctl - Ioctl function implementing
656 * the user surface destroy functionality.
657 *
658 * @dev: Pointer to a struct drm_device.
659 * @data: Pointer to data copied from / to user-space.
660 * @file_priv: Pointer to a drm file private structure.
661 */
662int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
663 struct drm_file *file_priv)
664{
665 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
666 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
667
668 return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
669}
670
671/**
672 * vmw_user_surface_define_ioctl - Ioctl function implementing
673 * the user surface define functionality.
674 *
675 * @dev: Pointer to a struct drm_device.
676 * @data: Pointer to data copied from / to user-space.
677 * @file_priv: Pointer to a drm file private structure.
678 */
679int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
680 struct drm_file *file_priv)
681{
682 struct vmw_private *dev_priv = vmw_priv(dev);
683 struct vmw_user_surface *user_srf;
684 struct vmw_surface *srf;
685 struct vmw_resource *res;
686 struct vmw_resource *tmp;
687 union drm_vmw_surface_create_arg *arg =
688 (union drm_vmw_surface_create_arg *)data;
689 struct drm_vmw_surface_create_req *req = &arg->req;
690 struct drm_vmw_surface_arg *rep = &arg->rep;
691 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
692 struct drm_vmw_size __user *user_sizes;
693 int ret;
694 int i, j;
695 uint32_t cur_bo_offset;
696 struct drm_vmw_size *cur_size;
697 struct vmw_surface_offset *cur_offset;
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000698 uint32_t num_sizes;
699 uint32_t size;
700 struct vmw_master *vmaster = vmw_master(file_priv->master);
Thomas Hellstrom7e8d9da2012-11-20 12:19:37 +0000701 const struct svga3d_surface_desc *desc;
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000702
703 if (unlikely(vmw_user_surface_size == 0))
704 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
705 128;
706
707 num_sizes = 0;
708 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
709 num_sizes += req->mip_levels[i];
710
711 if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
712 DRM_VMW_MAX_MIP_LEVELS)
713 return -EINVAL;
714
715 size = vmw_user_surface_size + 128 +
716 ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
717 ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
718
719
Thomas Hellstrom7e8d9da2012-11-20 12:19:37 +0000720 desc = svga3dsurface_get_desc(req->format);
721 if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
722 DRM_ERROR("Invalid surface format for surface creation.\n");
723 return -EINVAL;
724 }
725
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000726 ret = ttm_read_lock(&vmaster->lock, true);
727 if (unlikely(ret != 0))
728 return ret;
729
730 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
731 size, false, true);
732 if (unlikely(ret != 0)) {
733 if (ret != -ERESTARTSYS)
734 DRM_ERROR("Out of graphics memory for surface"
735 " creation.\n");
736 goto out_unlock;
737 }
738
739 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
740 if (unlikely(user_srf == NULL)) {
741 ret = -ENOMEM;
742 goto out_no_user_srf;
743 }
744
745 srf = &user_srf->srf;
746 res = &srf->res;
747
748 srf->flags = req->flags;
749 srf->format = req->format;
750 srf->scanout = req->scanout;
751
752 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
753 srf->num_sizes = num_sizes;
754 user_srf->size = size;
755
756 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
757 if (unlikely(srf->sizes == NULL)) {
758 ret = -ENOMEM;
759 goto out_no_sizes;
760 }
761 srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
762 GFP_KERNEL);
763 if (unlikely(srf->sizes == NULL)) {
764 ret = -ENOMEM;
765 goto out_no_offsets;
766 }
767
768 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
769 req->size_addr;
770
771 ret = copy_from_user(srf->sizes, user_sizes,
772 srf->num_sizes * sizeof(*srf->sizes));
773 if (unlikely(ret != 0)) {
774 ret = -EFAULT;
775 goto out_no_copy;
776 }
777
778 srf->base_size = *srf->sizes;
779 srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
Zack Rusin15c6f652012-11-21 12:25:33 +0100780 srf->multisample_count = 0;
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000781
782 cur_bo_offset = 0;
783 cur_offset = srf->offsets;
784 cur_size = srf->sizes;
785
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000786 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
787 for (j = 0; j < srf->mip_levels[i]; ++j) {
Thomas Hellstrom7e8d9da2012-11-20 12:19:37 +0000788 uint32_t stride = svga3dsurface_calculate_pitch
789 (desc, cur_size);
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000790
791 cur_offset->face = i;
792 cur_offset->mip = j;
793 cur_offset->bo_offset = cur_bo_offset;
Thomas Hellstrom7e8d9da2012-11-20 12:19:37 +0000794 cur_bo_offset += svga3dsurface_get_image_buffer_size
795 (desc, cur_size, stride);
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000796 ++cur_offset;
797 ++cur_size;
798 }
799 }
800 res->backup_size = cur_bo_offset;
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000801 if (srf->scanout &&
802 srf->num_sizes == 1 &&
803 srf->sizes[0].width == 64 &&
804 srf->sizes[0].height == 64 &&
805 srf->format == SVGA3D_A8R8G8B8) {
806
807 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
808 /* clear the image */
809 if (srf->snooper.image) {
810 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
811 } else {
812 DRM_ERROR("Failed to allocate cursor_image\n");
813 ret = -ENOMEM;
814 goto out_no_copy;
815 }
816 } else {
817 srf->snooper.image = NULL;
818 }
819 srf->snooper.crtc = NULL;
820
Thomas Hellstrom79e5f812013-11-08 02:12:51 -0800821 user_srf->prime.base.shareable = false;
822 user_srf->prime.base.tfile = NULL;
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000823
824 /**
825 * From this point, the generic resource management functions
826 * destroy the object on failure.
827 */
828
829 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
830 if (unlikely(ret != 0))
831 goto out_unlock;
832
833 tmp = vmw_resource_reference(&srf->res);
Thomas Hellstrom79e5f812013-11-08 02:12:51 -0800834 ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
835 req->shareable, VMW_RES_SURFACE,
836 &vmw_user_surface_base_release, NULL);
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000837
838 if (unlikely(ret != 0)) {
839 vmw_resource_unreference(&tmp);
840 vmw_resource_unreference(&res);
841 goto out_unlock;
842 }
843
Thomas Hellstrom79e5f812013-11-08 02:12:51 -0800844 rep->sid = user_srf->prime.base.hash.key;
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000845 vmw_resource_unreference(&res);
846
847 ttm_read_unlock(&vmaster->lock);
848 return 0;
849out_no_copy:
850 kfree(srf->offsets);
851out_no_offsets:
852 kfree(srf->sizes);
853out_no_sizes:
Thomas Hellstrom79e5f812013-11-08 02:12:51 -0800854 ttm_prime_object_kfree(user_srf, prime);
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000855out_no_user_srf:
856 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
857out_unlock:
858 ttm_read_unlock(&vmaster->lock);
859 return ret;
860}
861
862/**
863 * vmw_user_surface_define_ioctl - Ioctl function implementing
864 * the user surface reference functionality.
865 *
866 * @dev: Pointer to a struct drm_device.
867 * @data: Pointer to data copied from / to user-space.
868 * @file_priv: Pointer to a drm file private structure.
869 */
870int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
871 struct drm_file *file_priv)
872{
Thomas Hellstrom05efb1a2013-12-18 14:13:29 +0100873 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000874 union drm_vmw_surface_reference_arg *arg =
875 (union drm_vmw_surface_reference_arg *)data;
876 struct drm_vmw_surface_arg *req = &arg->req;
877 struct drm_vmw_surface_create_req *rep = &arg->rep;
878 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
879 struct vmw_surface *srf;
880 struct vmw_user_surface *user_srf;
881 struct drm_vmw_size __user *user_sizes;
882 struct ttm_base_object *base;
883 int ret = -EINVAL;
884
Thomas Hellstrom05efb1a2013-12-18 14:13:29 +0100885 base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000886 if (unlikely(base == NULL)) {
887 DRM_ERROR("Could not find surface to reference.\n");
888 return -EINVAL;
889 }
890
Thomas Hellstrom79e5f812013-11-08 02:12:51 -0800891 if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000892 goto out_bad_resource;
893
Thomas Hellstrom79e5f812013-11-08 02:12:51 -0800894 user_srf = container_of(base, struct vmw_user_surface, prime.base);
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000895 srf = &user_srf->srf;
896
Thomas Hellstrom79e5f812013-11-08 02:12:51 -0800897 ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
898 TTM_REF_USAGE, NULL);
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000899 if (unlikely(ret != 0)) {
900 DRM_ERROR("Could not add a reference to a surface.\n");
901 goto out_no_reference;
902 }
903
904 rep->flags = srf->flags;
905 rep->format = srf->format;
906 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
907 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
908 rep->size_addr;
909
910 if (user_sizes)
911 ret = copy_to_user(user_sizes, srf->sizes,
912 srf->num_sizes * sizeof(*srf->sizes));
913 if (unlikely(ret != 0)) {
914 DRM_ERROR("copy_to_user failed %p %u\n",
915 user_sizes, srf->num_sizes);
916 ret = -EFAULT;
917 }
918out_bad_resource:
919out_no_reference:
920 ttm_base_object_unref(&base);
921
922 return ret;
923}
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100924
925/**
926 * vmw_surface_define_encode - Encode a surface_define command.
927 *
928 * @srf: Pointer to a struct vmw_surface object.
929 * @cmd_space: Pointer to memory area in which the commands should be encoded.
930 */
931static int vmw_gb_surface_create(struct vmw_resource *res)
932{
933 struct vmw_private *dev_priv = res->dev_priv;
934 struct vmw_surface *srf = vmw_res_to_srf(res);
935 uint32_t cmd_len, submit_len;
936 int ret;
937 struct {
938 SVGA3dCmdHeader header;
939 SVGA3dCmdDefineGBSurface body;
940 } *cmd;
941
942 if (likely(res->id != -1))
943 return 0;
944
945 (void) vmw_3d_resource_inc(dev_priv, false);
946 ret = vmw_resource_alloc_id(res);
947 if (unlikely(ret != 0)) {
948 DRM_ERROR("Failed to allocate a surface id.\n");
949 goto out_no_id;
950 }
951
952 if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
953 ret = -EBUSY;
954 goto out_no_fifo;
955 }
956
957 cmd_len = sizeof(cmd->body);
958 submit_len = sizeof(*cmd);
959 cmd = vmw_fifo_reserve(dev_priv, submit_len);
960 if (unlikely(cmd == NULL)) {
961 DRM_ERROR("Failed reserving FIFO space for surface "
962 "creation.\n");
963 ret = -ENOMEM;
964 goto out_no_fifo;
965 }
966
967 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
968 cmd->header.size = cmd_len;
969 cmd->body.sid = srf->res.id;
970 cmd->body.surfaceFlags = srf->flags;
971 cmd->body.format = cpu_to_le32(srf->format);
972 cmd->body.numMipLevels = srf->mip_levels[0];
973 cmd->body.multisampleCount = srf->multisample_count;
974 cmd->body.autogenFilter = srf->autogen_filter;
975 cmd->body.size.width = srf->base_size.width;
976 cmd->body.size.height = srf->base_size.height;
977 cmd->body.size.depth = srf->base_size.depth;
978 vmw_fifo_commit(dev_priv, submit_len);
979
980 return 0;
981
982out_no_fifo:
983 vmw_resource_release_id(res);
984out_no_id:
985 vmw_3d_resource_dec(dev_priv, false);
986 return ret;
987}
988
989
990static int vmw_gb_surface_bind(struct vmw_resource *res,
991 struct ttm_validate_buffer *val_buf)
992{
993 struct vmw_private *dev_priv = res->dev_priv;
994 struct {
995 SVGA3dCmdHeader header;
996 SVGA3dCmdBindGBSurface body;
997 } *cmd1;
998 struct {
999 SVGA3dCmdHeader header;
1000 SVGA3dCmdUpdateGBSurface body;
1001 } *cmd2;
1002 uint32_t submit_size;
1003 struct ttm_buffer_object *bo = val_buf->bo;
1004
1005 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
1006
1007 submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
1008
1009 cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
1010 if (unlikely(cmd1 == NULL)) {
1011 DRM_ERROR("Failed reserving FIFO space for surface "
1012 "binding.\n");
1013 return -ENOMEM;
1014 }
1015
1016 cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1017 cmd1->header.size = sizeof(cmd1->body);
1018 cmd1->body.sid = res->id;
1019 cmd1->body.mobid = bo->mem.start;
1020 if (res->backup_dirty) {
1021 cmd2 = (void *) &cmd1[1];
1022 cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
1023 cmd2->header.size = sizeof(cmd2->body);
1024 cmd2->body.sid = res->id;
1025 res->backup_dirty = false;
1026 }
1027 vmw_fifo_commit(dev_priv, submit_size);
1028
1029 return 0;
1030}
1031
1032static int vmw_gb_surface_unbind(struct vmw_resource *res,
1033 bool readback,
1034 struct ttm_validate_buffer *val_buf)
1035{
1036 struct vmw_private *dev_priv = res->dev_priv;
1037 struct ttm_buffer_object *bo = val_buf->bo;
1038 struct vmw_fence_obj *fence;
1039
1040 struct {
1041 SVGA3dCmdHeader header;
1042 SVGA3dCmdReadbackGBSurface body;
1043 } *cmd1;
1044 struct {
1045 SVGA3dCmdHeader header;
Jakob Bornecrantz1985f992014-01-17 09:12:26 +01001046 SVGA3dCmdInvalidateGBSurface body;
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001047 } *cmd2;
Jakob Bornecrantz1985f992014-01-17 09:12:26 +01001048 struct {
1049 SVGA3dCmdHeader header;
1050 SVGA3dCmdBindGBSurface body;
1051 } *cmd3;
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001052 uint32_t submit_size;
1053 uint8_t *cmd;
1054
1055
1056 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
1057
Jakob Bornecrantz1985f992014-01-17 09:12:26 +01001058 submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001059 cmd = vmw_fifo_reserve(dev_priv, submit_size);
1060 if (unlikely(cmd == NULL)) {
1061 DRM_ERROR("Failed reserving FIFO space for surface "
1062 "unbinding.\n");
1063 return -ENOMEM;
1064 }
1065
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001066 if (readback) {
1067 cmd1 = (void *) cmd;
1068 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
1069 cmd1->header.size = sizeof(cmd1->body);
1070 cmd1->body.sid = res->id;
Jakob Bornecrantz1985f992014-01-17 09:12:26 +01001071 cmd3 = (void *) &cmd1[1];
1072 } else {
1073 cmd2 = (void *) cmd;
1074 cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
1075 cmd2->header.size = sizeof(cmd2->body);
1076 cmd2->body.sid = res->id;
1077 cmd3 = (void *) &cmd2[1];
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001078 }
Jakob Bornecrantz1985f992014-01-17 09:12:26 +01001079
1080 cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1081 cmd3->header.size = sizeof(cmd3->body);
1082 cmd3->body.sid = res->id;
1083 cmd3->body.mobid = SVGA3D_INVALID_ID;
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001084
1085 vmw_fifo_commit(dev_priv, submit_size);
1086
1087 /*
1088 * Create a fence object and fence the backup buffer.
1089 */
1090
1091 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
1092 &fence, NULL);
1093
1094 vmw_fence_single_bo(val_buf->bo, fence);
1095
1096 if (likely(fence != NULL))
1097 vmw_fence_obj_unreference(&fence);
1098
1099 return 0;
1100}
1101
1102static int vmw_gb_surface_destroy(struct vmw_resource *res)
1103{
1104 struct vmw_private *dev_priv = res->dev_priv;
1105 struct {
1106 SVGA3dCmdHeader header;
1107 SVGA3dCmdDestroyGBSurface body;
1108 } *cmd;
1109
1110 if (likely(res->id == -1))
1111 return 0;
1112
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07001113 mutex_lock(&dev_priv->binding_mutex);
1114 vmw_context_binding_res_list_kill(&res->binding_head);
1115
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001116 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
1117 if (unlikely(cmd == NULL)) {
1118 DRM_ERROR("Failed reserving FIFO space for surface "
1119 "destruction.\n");
Thomas Hellstrom3e894a62014-01-20 11:33:04 +01001120 mutex_unlock(&dev_priv->binding_mutex);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001121 return -ENOMEM;
1122 }
1123
1124 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
1125 cmd->header.size = sizeof(cmd->body);
1126 cmd->body.sid = res->id;
1127 vmw_fifo_commit(dev_priv, sizeof(*cmd));
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07001128 mutex_unlock(&dev_priv->binding_mutex);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001129 vmw_resource_release_id(res);
1130 vmw_3d_resource_dec(dev_priv, false);
1131
1132 return 0;
1133}
1134
1135/**
1136 * vmw_gb_surface_define_ioctl - Ioctl function implementing
1137 * the user surface define functionality.
1138 *
1139 * @dev: Pointer to a struct drm_device.
1140 * @data: Pointer to data copied from / to user-space.
1141 * @file_priv: Pointer to a drm file private structure.
1142 */
1143int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1144 struct drm_file *file_priv)
1145{
1146 struct vmw_private *dev_priv = vmw_priv(dev);
1147 struct vmw_user_surface *user_srf;
1148 struct vmw_surface *srf;
1149 struct vmw_resource *res;
1150 struct vmw_resource *tmp;
1151 union drm_vmw_gb_surface_create_arg *arg =
1152 (union drm_vmw_gb_surface_create_arg *)data;
1153 struct drm_vmw_gb_surface_create_req *req = &arg->req;
1154 struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
1155 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1156 int ret;
1157 uint32_t size;
1158 struct vmw_master *vmaster = vmw_master(file_priv->master);
1159 const struct svga3d_surface_desc *desc;
1160 uint32_t backup_handle;
1161
1162 if (unlikely(vmw_user_surface_size == 0))
1163 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
1164 128;
1165
1166 size = vmw_user_surface_size + 128;
1167
1168 desc = svga3dsurface_get_desc(req->format);
1169 if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
1170 DRM_ERROR("Invalid surface format for surface creation.\n");
1171 return -EINVAL;
1172 }
1173
1174 ret = ttm_read_lock(&vmaster->lock, true);
1175 if (unlikely(ret != 0))
1176 return ret;
1177
1178 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1179 size, false, true);
1180 if (unlikely(ret != 0)) {
1181 if (ret != -ERESTARTSYS)
1182 DRM_ERROR("Out of graphics memory for surface"
1183 " creation.\n");
1184 goto out_unlock;
1185 }
1186
1187 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
1188 if (unlikely(user_srf == NULL)) {
1189 ret = -ENOMEM;
1190 goto out_no_user_srf;
1191 }
1192
1193 srf = &user_srf->srf;
1194 res = &srf->res;
1195
1196 srf->flags = req->svga3d_flags;
1197 srf->format = req->format;
1198 srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout;
1199 srf->mip_levels[0] = req->mip_levels;
1200 srf->num_sizes = 1;
1201 srf->sizes = NULL;
1202 srf->offsets = NULL;
1203 user_srf->size = size;
1204 srf->base_size = req->base_size;
1205 srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
1206 srf->multisample_count = req->multisample_count;
1207 res->backup_size = svga3dsurface_get_serialized_size
1208 (srf->format, srf->base_size, srf->mip_levels[0],
1209 srf->flags & SVGA3D_SURFACE_CUBEMAP);
1210
1211 user_srf->prime.base.shareable = false;
1212 user_srf->prime.base.tfile = NULL;
1213
1214 /**
1215 * From this point, the generic resource management functions
1216 * destroy the object on failure.
1217 */
1218
1219 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
1220 if (unlikely(ret != 0))
1221 goto out_unlock;
1222
1223 if (req->buffer_handle != SVGA3D_INVALID_ID) {
1224 ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
1225 &res->backup);
1226 } else if (req->drm_surface_flags &
1227 drm_vmw_surface_flag_create_buffer)
1228 ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
1229 res->backup_size,
1230 req->drm_surface_flags &
1231 drm_vmw_surface_flag_shareable,
1232 &backup_handle,
1233 &res->backup);
1234
1235 if (unlikely(ret != 0)) {
1236 vmw_resource_unreference(&res);
1237 goto out_unlock;
1238 }
1239
1240 tmp = vmw_resource_reference(&srf->res);
1241 ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
1242 req->drm_surface_flags &
1243 drm_vmw_surface_flag_shareable,
1244 VMW_RES_SURFACE,
1245 &vmw_user_surface_base_release, NULL);
1246
1247 if (unlikely(ret != 0)) {
1248 vmw_resource_unreference(&tmp);
1249 vmw_resource_unreference(&res);
1250 goto out_unlock;
1251 }
1252
1253 rep->handle = user_srf->prime.base.hash.key;
1254 rep->backup_size = res->backup_size;
1255 if (res->backup) {
1256 rep->buffer_map_handle =
1257 drm_vma_node_offset_addr(&res->backup->base.vma_node);
1258 rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
1259 rep->buffer_handle = backup_handle;
1260 } else {
1261 rep->buffer_map_handle = 0;
1262 rep->buffer_size = 0;
1263 rep->buffer_handle = SVGA3D_INVALID_ID;
1264 }
1265
1266 vmw_resource_unreference(&res);
1267
1268 ttm_read_unlock(&vmaster->lock);
1269 return 0;
1270out_no_user_srf:
1271 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
1272out_unlock:
1273 ttm_read_unlock(&vmaster->lock);
1274 return ret;
1275}
1276
1277/**
1278 * vmw_gb_surface_reference_ioctl - Ioctl function implementing
1279 * the user surface reference functionality.
1280 *
1281 * @dev: Pointer to a struct drm_device.
1282 * @data: Pointer to data copied from / to user-space.
1283 * @file_priv: Pointer to a drm file private structure.
1284 */
1285int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1286 struct drm_file *file_priv)
1287{
1288 struct vmw_private *dev_priv = vmw_priv(dev);
1289 union drm_vmw_gb_surface_reference_arg *arg =
1290 (union drm_vmw_gb_surface_reference_arg *)data;
1291 struct drm_vmw_surface_arg *req = &arg->req;
1292 struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
1293 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1294 struct vmw_surface *srf;
1295 struct vmw_user_surface *user_srf;
1296 struct ttm_base_object *base;
1297 uint32_t backup_handle;
1298 int ret = -EINVAL;
1299
1300 base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
1301 if (unlikely(base == NULL)) {
1302 DRM_ERROR("Could not find surface to reference.\n");
1303 return -EINVAL;
1304 }
1305
1306 if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
1307 goto out_bad_resource;
1308
1309 user_srf = container_of(base, struct vmw_user_surface, prime.base);
1310 srf = &user_srf->srf;
1311 if (srf->res.backup == NULL) {
1312 DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
1313 goto out_bad_resource;
1314 }
1315
1316 ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
1317 TTM_REF_USAGE, NULL);
1318 if (unlikely(ret != 0)) {
1319 DRM_ERROR("Could not add a reference to a GB surface.\n");
1320 goto out_bad_resource;
1321 }
1322
1323 mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
1324 ret = vmw_user_dmabuf_reference(tfile, srf->res.backup,
1325 &backup_handle);
1326 mutex_unlock(&dev_priv->cmdbuf_mutex);
1327
1328 if (unlikely(ret != 0)) {
1329 DRM_ERROR("Could not add a reference to a GB surface "
1330 "backup buffer.\n");
1331 (void) ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1332 req->sid,
1333 TTM_REF_USAGE);
1334 goto out_bad_resource;
1335 }
1336
1337 rep->creq.svga3d_flags = srf->flags;
1338 rep->creq.format = srf->format;
1339 rep->creq.mip_levels = srf->mip_levels[0];
1340 rep->creq.drm_surface_flags = 0;
1341 rep->creq.multisample_count = srf->multisample_count;
1342 rep->creq.autogen_filter = srf->autogen_filter;
1343 rep->creq.buffer_handle = backup_handle;
1344 rep->creq.base_size = srf->base_size;
1345 rep->crep.handle = user_srf->prime.base.hash.key;
1346 rep->crep.backup_size = srf->res.backup_size;
1347 rep->crep.buffer_handle = backup_handle;
1348 rep->crep.buffer_map_handle =
1349 drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
1350 rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
1351
1352out_bad_resource:
1353 ttm_base_object_unref(&base);
1354
1355 return ret;
1356}