blob: c4a7bcdabd488d67b89de9fc4d79c672791a2cea [file] [log] [blame]
Thomas Hellstrom543831c2012-11-20 12:19:36 +00001/**************************************************************************
2 *
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_resource_priv.h"
30#include <ttm/ttm_placement.h>
31
32/**
33 * struct vmw_user_surface - User-space visible surface resource
34 *
35 * @base: The TTM base object handling user-space visibility.
36 * @srf: The surface metadata.
37 * @size: TTM accounting size for the surface.
38 */
39struct vmw_user_surface {
40 struct ttm_base_object base;
41 struct vmw_surface srf;
42 uint32_t size;
43 uint32_t backup_handle;
44};
45
46/**
47 * struct vmw_surface_offset - Backing store mip level offset info
48 *
49 * @face: Surface face.
50 * @mip: Mip level.
51 * @bo_offset: Offset into backing store of this mip level.
52 *
53 */
54struct vmw_surface_offset {
55 uint32_t face;
56 uint32_t mip;
57 uint32_t bo_offset;
58};
59
60static void vmw_user_surface_free(struct vmw_resource *res);
61static struct vmw_resource *
62vmw_user_surface_base_to_res(struct ttm_base_object *base);
63static int vmw_legacy_srf_bind(struct vmw_resource *res,
64 struct ttm_validate_buffer *val_buf);
65static int vmw_legacy_srf_unbind(struct vmw_resource *res,
66 bool readback,
67 struct ttm_validate_buffer *val_buf);
68static int vmw_legacy_srf_create(struct vmw_resource *res);
69static int vmw_legacy_srf_destroy(struct vmw_resource *res);
70
71static const struct vmw_user_resource_conv user_surface_conv = {
72 .object_type = VMW_RES_SURFACE,
73 .base_obj_to_res = vmw_user_surface_base_to_res,
74 .res_free = vmw_user_surface_free
75};
76
77const struct vmw_user_resource_conv *user_surface_converter =
78 &user_surface_conv;
79
80
81static uint64_t vmw_user_surface_size;
82
83static const struct vmw_res_func vmw_legacy_surface_func = {
84 .res_type = vmw_res_surface,
85 .needs_backup = false,
86 .may_evict = true,
87 .type_name = "legacy surfaces",
88 .backup_placement = &vmw_srf_placement,
89 .create = &vmw_legacy_srf_create,
90 .destroy = &vmw_legacy_srf_destroy,
91 .bind = &vmw_legacy_srf_bind,
92 .unbind = &vmw_legacy_srf_unbind
93};
94
95/**
96 * struct vmw_bpp - Bits per pixel info for surface storage size computation.
97 *
98 * @bpp: Bits per pixel.
99 * @s_bpp: Stride bits per pixel. See definition below.
100 *
101 */
102struct vmw_bpp {
103 uint8_t bpp;
104 uint8_t s_bpp;
105};
106
107/*
108 * Size table for the supported SVGA3D surface formats. It consists of
109 * two values. The bpp value and the s_bpp value which is short for
110 * "stride bits per pixel" The values are given in such a way that the
111 * minimum stride for the image is calculated using
112 *
113 * min_stride = w*s_bpp
114 *
115 * and the total memory requirement for the image is
116 *
117 * h*min_stride*bpp/s_bpp
118 *
119 */
120static const struct vmw_bpp vmw_sf_bpp[] = {
121 [SVGA3D_FORMAT_INVALID] = {0, 0},
122 [SVGA3D_X8R8G8B8] = {32, 32},
123 [SVGA3D_A8R8G8B8] = {32, 32},
124 [SVGA3D_R5G6B5] = {16, 16},
125 [SVGA3D_X1R5G5B5] = {16, 16},
126 [SVGA3D_A1R5G5B5] = {16, 16},
127 [SVGA3D_A4R4G4B4] = {16, 16},
128 [SVGA3D_Z_D32] = {32, 32},
129 [SVGA3D_Z_D16] = {16, 16},
130 [SVGA3D_Z_D24S8] = {32, 32},
131 [SVGA3D_Z_D15S1] = {16, 16},
132 [SVGA3D_LUMINANCE8] = {8, 8},
133 [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8},
134 [SVGA3D_LUMINANCE16] = {16, 16},
135 [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16},
136 [SVGA3D_DXT1] = {4, 16},
137 [SVGA3D_DXT2] = {8, 32},
138 [SVGA3D_DXT3] = {8, 32},
139 [SVGA3D_DXT4] = {8, 32},
140 [SVGA3D_DXT5] = {8, 32},
141 [SVGA3D_BUMPU8V8] = {16, 16},
142 [SVGA3D_BUMPL6V5U5] = {16, 16},
143 [SVGA3D_BUMPX8L8V8U8] = {32, 32},
144 [SVGA3D_ARGB_S10E5] = {16, 16},
145 [SVGA3D_ARGB_S23E8] = {32, 32},
146 [SVGA3D_A2R10G10B10] = {32, 32},
147 [SVGA3D_V8U8] = {16, 16},
148 [SVGA3D_Q8W8V8U8] = {32, 32},
149 [SVGA3D_CxV8U8] = {16, 16},
150 [SVGA3D_X8L8V8U8] = {32, 32},
151 [SVGA3D_A2W10V10U10] = {32, 32},
152 [SVGA3D_ALPHA8] = {8, 8},
153 [SVGA3D_R_S10E5] = {16, 16},
154 [SVGA3D_R_S23E8] = {32, 32},
155 [SVGA3D_RG_S10E5] = {16, 16},
156 [SVGA3D_RG_S23E8] = {32, 32},
157 [SVGA3D_BUFFER] = {8, 8},
158 [SVGA3D_Z_D24X8] = {32, 32},
159 [SVGA3D_V16U16] = {32, 32},
160 [SVGA3D_G16R16] = {32, 32},
161 [SVGA3D_A16B16G16R16] = {64, 64},
162 [SVGA3D_UYVY] = {12, 12},
163 [SVGA3D_YUY2] = {12, 12},
164 [SVGA3D_NV12] = {12, 8},
165 [SVGA3D_AYUV] = {32, 32},
166 [SVGA3D_BC4_UNORM] = {4, 16},
167 [SVGA3D_BC5_UNORM] = {8, 32},
168 [SVGA3D_Z_DF16] = {16, 16},
169 [SVGA3D_Z_DF24] = {24, 24},
170 [SVGA3D_Z_D24S8_INT] = {32, 32}
171};
172
173
174/**
175 * struct vmw_surface_dma - SVGA3D DMA command
176 */
177struct vmw_surface_dma {
178 SVGA3dCmdHeader header;
179 SVGA3dCmdSurfaceDMA body;
180 SVGA3dCopyBox cb;
181 SVGA3dCmdSurfaceDMASuffix suffix;
182};
183
184/**
185 * struct vmw_surface_define - SVGA3D Surface Define command
186 */
187struct vmw_surface_define {
188 SVGA3dCmdHeader header;
189 SVGA3dCmdDefineSurface body;
190};
191
192/**
193 * struct vmw_surface_destroy - SVGA3D Surface Destroy command
194 */
195struct vmw_surface_destroy {
196 SVGA3dCmdHeader header;
197 SVGA3dCmdDestroySurface body;
198};
199
200
201/**
202 * vmw_surface_dma_size - Compute fifo size for a dma command.
203 *
204 * @srf: Pointer to a struct vmw_surface
205 *
206 * Computes the required size for a surface dma command for backup or
207 * restoration of the surface represented by @srf.
208 */
209static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
210{
211 return srf->num_sizes * sizeof(struct vmw_surface_dma);
212}
213
214
215/**
216 * vmw_surface_define_size - Compute fifo size for a surface define command.
217 *
218 * @srf: Pointer to a struct vmw_surface
219 *
220 * Computes the required size for a surface define command for the definition
221 * of the surface represented by @srf.
222 */
223static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
224{
225 return sizeof(struct vmw_surface_define) + srf->num_sizes *
226 sizeof(SVGA3dSize);
227}
228
229
230/**
231 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
232 *
233 * Computes the required size for a surface destroy command for the destruction
234 * of a hw surface.
235 */
236static inline uint32_t vmw_surface_destroy_size(void)
237{
238 return sizeof(struct vmw_surface_destroy);
239}
240
241/**
242 * vmw_surface_destroy_encode - Encode a surface_destroy command.
243 *
244 * @id: The surface id
245 * @cmd_space: Pointer to memory area in which the commands should be encoded.
246 */
247static void vmw_surface_destroy_encode(uint32_t id,
248 void *cmd_space)
249{
250 struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
251 cmd_space;
252
253 cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
254 cmd->header.size = sizeof(cmd->body);
255 cmd->body.sid = id;
256}
257
258/**
259 * vmw_surface_define_encode - Encode a surface_define command.
260 *
261 * @srf: Pointer to a struct vmw_surface object.
262 * @cmd_space: Pointer to memory area in which the commands should be encoded.
263 */
264static void vmw_surface_define_encode(const struct vmw_surface *srf,
265 void *cmd_space)
266{
267 struct vmw_surface_define *cmd = (struct vmw_surface_define *)
268 cmd_space;
269 struct drm_vmw_size *src_size;
270 SVGA3dSize *cmd_size;
271 uint32_t cmd_len;
272 int i;
273
274 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
275
276 cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
277 cmd->header.size = cmd_len;
278 cmd->body.sid = srf->res.id;
279 cmd->body.surfaceFlags = srf->flags;
280 cmd->body.format = cpu_to_le32(srf->format);
281 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
282 cmd->body.face[i].numMipLevels = srf->mip_levels[i];
283
284 cmd += 1;
285 cmd_size = (SVGA3dSize *) cmd;
286 src_size = srf->sizes;
287
288 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
289 cmd_size->width = src_size->width;
290 cmd_size->height = src_size->height;
291 cmd_size->depth = src_size->depth;
292 }
293}
294
295/**
296 * vmw_surface_dma_encode - Encode a surface_dma command.
297 *
298 * @srf: Pointer to a struct vmw_surface object.
299 * @cmd_space: Pointer to memory area in which the commands should be encoded.
300 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
301 * should be placed or read from.
302 * @to_surface: Boolean whether to DMA to the surface or from the surface.
303 */
304static void vmw_surface_dma_encode(struct vmw_surface *srf,
305 void *cmd_space,
306 const SVGAGuestPtr *ptr,
307 bool to_surface)
308{
309 uint32_t i;
310 uint32_t bpp = vmw_sf_bpp[srf->format].bpp;
311 uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
312 struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
313
314 for (i = 0; i < srf->num_sizes; ++i) {
315 SVGA3dCmdHeader *header = &cmd->header;
316 SVGA3dCmdSurfaceDMA *body = &cmd->body;
317 SVGA3dCopyBox *cb = &cmd->cb;
318 SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
319 const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
320 const struct drm_vmw_size *cur_size = &srf->sizes[i];
321
322 header->id = SVGA_3D_CMD_SURFACE_DMA;
323 header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
324
325 body->guest.ptr = *ptr;
326 body->guest.ptr.offset += cur_offset->bo_offset;
327 body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3;
328 body->host.sid = srf->res.id;
329 body->host.face = cur_offset->face;
330 body->host.mipmap = cur_offset->mip;
331 body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
332 SVGA3D_READ_HOST_VRAM);
333 cb->x = 0;
334 cb->y = 0;
335 cb->z = 0;
336 cb->srcx = 0;
337 cb->srcy = 0;
338 cb->srcz = 0;
339 cb->w = cur_size->width;
340 cb->h = cur_size->height;
341 cb->d = cur_size->depth;
342
343 suffix->suffixSize = sizeof(*suffix);
344 suffix->maximumOffset = body->guest.pitch*cur_size->height*
345 cur_size->depth*bpp / stride_bpp;
346 suffix->flags.discard = 0;
347 suffix->flags.unsynchronized = 0;
348 suffix->flags.reserved = 0;
349 ++cmd;
350 }
351};
352
353
354/**
355 * vmw_hw_surface_destroy - destroy a Device surface
356 *
357 * @res: Pointer to a struct vmw_resource embedded in a struct
358 * vmw_surface.
359 *
360 * Destroys a the device surface associated with a struct vmw_surface if
361 * any, and adjusts accounting and resource count accordingly.
362 */
363static void vmw_hw_surface_destroy(struct vmw_resource *res)
364{
365
366 struct vmw_private *dev_priv = res->dev_priv;
367 struct vmw_surface *srf;
368 void *cmd;
369
370 if (res->id != -1) {
371
372 cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
373 if (unlikely(cmd == NULL)) {
374 DRM_ERROR("Failed reserving FIFO space for surface "
375 "destruction.\n");
376 return;
377 }
378
379 vmw_surface_destroy_encode(res->id, cmd);
380 vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
381
382 /*
383 * used_memory_size_atomic, or separate lock
384 * to avoid taking dev_priv::cmdbuf_mutex in
385 * the destroy path.
386 */
387
388 mutex_lock(&dev_priv->cmdbuf_mutex);
389 srf = vmw_res_to_srf(res);
390 dev_priv->used_memory_size -= res->backup_size;
391 mutex_unlock(&dev_priv->cmdbuf_mutex);
392 }
393 vmw_3d_resource_dec(dev_priv, false);
394}
395
396/**
397 * vmw_legacy_srf_create - Create a device surface as part of the
398 * resource validation process.
399 *
400 * @res: Pointer to a struct vmw_surface.
401 *
402 * If the surface doesn't have a hw id.
403 *
404 * Returns -EBUSY if there wasn't sufficient device resources to
405 * complete the validation. Retry after freeing up resources.
406 *
407 * May return other errors if the kernel is out of guest resources.
408 */
409static int vmw_legacy_srf_create(struct vmw_resource *res)
410{
411 struct vmw_private *dev_priv = res->dev_priv;
412 struct vmw_surface *srf;
413 uint32_t submit_size;
414 uint8_t *cmd;
415 int ret;
416
417 if (likely(res->id != -1))
418 return 0;
419
420 srf = vmw_res_to_srf(res);
421 if (unlikely(dev_priv->used_memory_size + res->backup_size >=
422 dev_priv->memory_size))
423 return -EBUSY;
424
425 /*
426 * Alloc id for the resource.
427 */
428
429 ret = vmw_resource_alloc_id(res);
430 if (unlikely(ret != 0)) {
431 DRM_ERROR("Failed to allocate a surface id.\n");
432 goto out_no_id;
433 }
434
435 if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
436 ret = -EBUSY;
437 goto out_no_fifo;
438 }
439
440 /*
441 * Encode surface define- commands.
442 */
443
444 submit_size = vmw_surface_define_size(srf);
445 cmd = vmw_fifo_reserve(dev_priv, submit_size);
446 if (unlikely(cmd == NULL)) {
447 DRM_ERROR("Failed reserving FIFO space for surface "
448 "creation.\n");
449 ret = -ENOMEM;
450 goto out_no_fifo;
451 }
452
453 vmw_surface_define_encode(srf, cmd);
454 vmw_fifo_commit(dev_priv, submit_size);
455 /*
456 * Surface memory usage accounting.
457 */
458
459 dev_priv->used_memory_size += res->backup_size;
460 return 0;
461
462out_no_fifo:
463 vmw_resource_release_id(res);
464out_no_id:
465 return ret;
466}
467
468/**
469 * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
470 *
471 * @res: Pointer to a struct vmw_res embedded in a struct
472 * vmw_surface.
473 * @val_buf: Pointer to a struct ttm_validate_buffer containing
474 * information about the backup buffer.
475 * @bind: Boolean wether to DMA to the surface.
476 *
477 * Transfer backup data to or from a legacy surface as part of the
478 * validation process.
479 * May return other errors if the kernel is out of guest resources.
480 * The backup buffer will be fenced or idle upon successful completion,
481 * and if the surface needs persistent backup storage, the backup buffer
482 * will also be returned reserved iff @bind is true.
483 */
484static int vmw_legacy_srf_dma(struct vmw_resource *res,
485 struct ttm_validate_buffer *val_buf,
486 bool bind)
487{
488 SVGAGuestPtr ptr;
489 struct vmw_fence_obj *fence;
490 uint32_t submit_size;
491 struct vmw_surface *srf = vmw_res_to_srf(res);
492 uint8_t *cmd;
493 struct vmw_private *dev_priv = res->dev_priv;
494
495 BUG_ON(val_buf->bo == NULL);
496
497 submit_size = vmw_surface_dma_size(srf);
498 cmd = vmw_fifo_reserve(dev_priv, submit_size);
499 if (unlikely(cmd == NULL)) {
500 DRM_ERROR("Failed reserving FIFO space for surface "
501 "DMA.\n");
502 return -ENOMEM;
503 }
504 vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
505 vmw_surface_dma_encode(srf, cmd, &ptr, bind);
506
507 vmw_fifo_commit(dev_priv, submit_size);
508
509 /*
510 * Create a fence object and fence the backup buffer.
511 */
512
513 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
514 &fence, NULL);
515
516 vmw_fence_single_bo(val_buf->bo, fence);
517
518 if (likely(fence != NULL))
519 vmw_fence_obj_unreference(&fence);
520
521 return 0;
522}
523
524/**
525 * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
526 * surface validation process.
527 *
528 * @res: Pointer to a struct vmw_res embedded in a struct
529 * vmw_surface.
530 * @val_buf: Pointer to a struct ttm_validate_buffer containing
531 * information about the backup buffer.
532 *
533 * This function will copy backup data to the surface if the
534 * backup buffer is dirty.
535 */
536static int vmw_legacy_srf_bind(struct vmw_resource *res,
537 struct ttm_validate_buffer *val_buf)
538{
539 if (!res->backup_dirty)
540 return 0;
541
542 return vmw_legacy_srf_dma(res, val_buf, true);
543}
544
545
546/**
547 * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
548 * surface eviction process.
549 *
550 * @res: Pointer to a struct vmw_res embedded in a struct
551 * vmw_surface.
552 * @val_buf: Pointer to a struct ttm_validate_buffer containing
553 * information about the backup buffer.
554 *
555 * This function will copy backup data from the surface.
556 */
557static int vmw_legacy_srf_unbind(struct vmw_resource *res,
558 bool readback,
559 struct ttm_validate_buffer *val_buf)
560{
561 if (unlikely(readback))
562 return vmw_legacy_srf_dma(res, val_buf, false);
563 return 0;
564}
565
566/**
567 * vmw_legacy_srf_destroy - Destroy a device surface as part of a
568 * resource eviction process.
569 *
570 * @res: Pointer to a struct vmw_res embedded in a struct
571 * vmw_surface.
572 */
573static int vmw_legacy_srf_destroy(struct vmw_resource *res)
574{
575 struct vmw_private *dev_priv = res->dev_priv;
576 uint32_t submit_size;
577 uint8_t *cmd;
578
579 BUG_ON(res->id == -1);
580
581 /*
582 * Encode the dma- and surface destroy commands.
583 */
584
585 submit_size = vmw_surface_destroy_size();
586 cmd = vmw_fifo_reserve(dev_priv, submit_size);
587 if (unlikely(cmd == NULL)) {
588 DRM_ERROR("Failed reserving FIFO space for surface "
589 "eviction.\n");
590 return -ENOMEM;
591 }
592
593 vmw_surface_destroy_encode(res->id, cmd);
594 vmw_fifo_commit(dev_priv, submit_size);
595
596 /*
597 * Surface memory usage accounting.
598 */
599
600 dev_priv->used_memory_size -= res->backup_size;
601
602 /*
603 * Release the surface ID.
604 */
605
606 vmw_resource_release_id(res);
607
608 return 0;
609}
610
611
612/**
613 * vmw_surface_init - initialize a struct vmw_surface
614 *
615 * @dev_priv: Pointer to a device private struct.
616 * @srf: Pointer to the struct vmw_surface to initialize.
617 * @res_free: Pointer to a resource destructor used to free
618 * the object.
619 */
620static int vmw_surface_init(struct vmw_private *dev_priv,
621 struct vmw_surface *srf,
622 void (*res_free) (struct vmw_resource *res))
623{
624 int ret;
625 struct vmw_resource *res = &srf->res;
626
627 BUG_ON(res_free == NULL);
628 (void) vmw_3d_resource_inc(dev_priv, false);
629 ret = vmw_resource_init(dev_priv, res, true, res_free,
630 &vmw_legacy_surface_func);
631
632 if (unlikely(ret != 0)) {
633 vmw_3d_resource_dec(dev_priv, false);
634 res_free(res);
635 return ret;
636 }
637
638 /*
639 * The surface won't be visible to hardware until a
640 * surface validate.
641 */
642
643 vmw_resource_activate(res, vmw_hw_surface_destroy);
644 return ret;
645}
646
647/**
648 * vmw_user_surface_base_to_res - TTM base object to resource converter for
649 * user visible surfaces
650 *
651 * @base: Pointer to a TTM base object
652 *
653 * Returns the struct vmw_resource embedded in a struct vmw_surface
654 * for the user-visible object identified by the TTM base object @base.
655 */
656static struct vmw_resource *
657vmw_user_surface_base_to_res(struct ttm_base_object *base)
658{
659 return &(container_of(base, struct vmw_user_surface, base)->srf.res);
660}
661
662/**
663 * vmw_user_surface_free - User visible surface resource destructor
664 *
665 * @res: A struct vmw_resource embedded in a struct vmw_surface.
666 */
667static void vmw_user_surface_free(struct vmw_resource *res)
668{
669 struct vmw_surface *srf = vmw_res_to_srf(res);
670 struct vmw_user_surface *user_srf =
671 container_of(srf, struct vmw_user_surface, srf);
672 struct vmw_private *dev_priv = srf->res.dev_priv;
673 uint32_t size = user_srf->size;
674
675 kfree(srf->offsets);
676 kfree(srf->sizes);
677 kfree(srf->snooper.image);
678 ttm_base_object_kfree(user_srf, base);
679 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
680}
681
682/**
683 * vmw_user_surface_free - User visible surface TTM base object destructor
684 *
685 * @p_base: Pointer to a pointer to a TTM base object
686 * embedded in a struct vmw_user_surface.
687 *
688 * Drops the base object's reference on its resource, and the
689 * pointer pointed to by *p_base is set to NULL.
690 */
691static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
692{
693 struct ttm_base_object *base = *p_base;
694 struct vmw_user_surface *user_srf =
695 container_of(base, struct vmw_user_surface, base);
696 struct vmw_resource *res = &user_srf->srf.res;
697
698 *p_base = NULL;
699 vmw_resource_unreference(&res);
700}
701
702/**
703 * vmw_user_surface_destroy_ioctl - Ioctl function implementing
704 * the user surface destroy functionality.
705 *
706 * @dev: Pointer to a struct drm_device.
707 * @data: Pointer to data copied from / to user-space.
708 * @file_priv: Pointer to a drm file private structure.
709 */
710int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
711 struct drm_file *file_priv)
712{
713 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
714 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
715
716 return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
717}
718
719/**
720 * vmw_user_surface_define_ioctl - Ioctl function implementing
721 * the user surface define functionality.
722 *
723 * @dev: Pointer to a struct drm_device.
724 * @data: Pointer to data copied from / to user-space.
725 * @file_priv: Pointer to a drm file private structure.
726 */
727int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
728 struct drm_file *file_priv)
729{
730 struct vmw_private *dev_priv = vmw_priv(dev);
731 struct vmw_user_surface *user_srf;
732 struct vmw_surface *srf;
733 struct vmw_resource *res;
734 struct vmw_resource *tmp;
735 union drm_vmw_surface_create_arg *arg =
736 (union drm_vmw_surface_create_arg *)data;
737 struct drm_vmw_surface_create_req *req = &arg->req;
738 struct drm_vmw_surface_arg *rep = &arg->rep;
739 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
740 struct drm_vmw_size __user *user_sizes;
741 int ret;
742 int i, j;
743 uint32_t cur_bo_offset;
744 struct drm_vmw_size *cur_size;
745 struct vmw_surface_offset *cur_offset;
746 uint32_t stride_bpp;
747 uint32_t bpp;
748 uint32_t num_sizes;
749 uint32_t size;
750 struct vmw_master *vmaster = vmw_master(file_priv->master);
751
752 if (unlikely(vmw_user_surface_size == 0))
753 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
754 128;
755
756 num_sizes = 0;
757 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
758 num_sizes += req->mip_levels[i];
759
760 if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
761 DRM_VMW_MAX_MIP_LEVELS)
762 return -EINVAL;
763
764 size = vmw_user_surface_size + 128 +
765 ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
766 ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
767
768
769 ret = ttm_read_lock(&vmaster->lock, true);
770 if (unlikely(ret != 0))
771 return ret;
772
773 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
774 size, false, true);
775 if (unlikely(ret != 0)) {
776 if (ret != -ERESTARTSYS)
777 DRM_ERROR("Out of graphics memory for surface"
778 " creation.\n");
779 goto out_unlock;
780 }
781
782 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
783 if (unlikely(user_srf == NULL)) {
784 ret = -ENOMEM;
785 goto out_no_user_srf;
786 }
787
788 srf = &user_srf->srf;
789 res = &srf->res;
790
791 srf->flags = req->flags;
792 srf->format = req->format;
793 srf->scanout = req->scanout;
794
795 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
796 srf->num_sizes = num_sizes;
797 user_srf->size = size;
798
799 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
800 if (unlikely(srf->sizes == NULL)) {
801 ret = -ENOMEM;
802 goto out_no_sizes;
803 }
804 srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
805 GFP_KERNEL);
806 if (unlikely(srf->sizes == NULL)) {
807 ret = -ENOMEM;
808 goto out_no_offsets;
809 }
810
811 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
812 req->size_addr;
813
814 ret = copy_from_user(srf->sizes, user_sizes,
815 srf->num_sizes * sizeof(*srf->sizes));
816 if (unlikely(ret != 0)) {
817 ret = -EFAULT;
818 goto out_no_copy;
819 }
820
821 srf->base_size = *srf->sizes;
822 srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
823 srf->multisample_count = 1;
824
825 cur_bo_offset = 0;
826 cur_offset = srf->offsets;
827 cur_size = srf->sizes;
828
829 bpp = vmw_sf_bpp[srf->format].bpp;
830 stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
831
832 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
833 for (j = 0; j < srf->mip_levels[i]; ++j) {
834 uint32_t stride =
835 (cur_size->width * stride_bpp + 7) >> 3;
836
837 cur_offset->face = i;
838 cur_offset->mip = j;
839 cur_offset->bo_offset = cur_bo_offset;
840 cur_bo_offset += stride * cur_size->height *
841 cur_size->depth * bpp / stride_bpp;
842 ++cur_offset;
843 ++cur_size;
844 }
845 }
846 res->backup_size = cur_bo_offset;
847
848 if (srf->scanout &&
849 srf->num_sizes == 1 &&
850 srf->sizes[0].width == 64 &&
851 srf->sizes[0].height == 64 &&
852 srf->format == SVGA3D_A8R8G8B8) {
853
854 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
855 /* clear the image */
856 if (srf->snooper.image) {
857 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
858 } else {
859 DRM_ERROR("Failed to allocate cursor_image\n");
860 ret = -ENOMEM;
861 goto out_no_copy;
862 }
863 } else {
864 srf->snooper.image = NULL;
865 }
866 srf->snooper.crtc = NULL;
867
868 user_srf->base.shareable = false;
869 user_srf->base.tfile = NULL;
870
871 /**
872 * From this point, the generic resource management functions
873 * destroy the object on failure.
874 */
875
876 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
877 if (unlikely(ret != 0))
878 goto out_unlock;
879
880 tmp = vmw_resource_reference(&srf->res);
881 ret = ttm_base_object_init(tfile, &user_srf->base,
882 req->shareable, VMW_RES_SURFACE,
883 &vmw_user_surface_base_release, NULL);
884
885 if (unlikely(ret != 0)) {
886 vmw_resource_unreference(&tmp);
887 vmw_resource_unreference(&res);
888 goto out_unlock;
889 }
890
891 rep->sid = user_srf->base.hash.key;
892 vmw_resource_unreference(&res);
893
894 ttm_read_unlock(&vmaster->lock);
895 return 0;
896out_no_copy:
897 kfree(srf->offsets);
898out_no_offsets:
899 kfree(srf->sizes);
900out_no_sizes:
901 ttm_base_object_kfree(user_srf, base);
902out_no_user_srf:
903 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
904out_unlock:
905 ttm_read_unlock(&vmaster->lock);
906 return ret;
907}
908
909/**
910 * vmw_user_surface_define_ioctl - Ioctl function implementing
911 * the user surface reference functionality.
912 *
913 * @dev: Pointer to a struct drm_device.
914 * @data: Pointer to data copied from / to user-space.
915 * @file_priv: Pointer to a drm file private structure.
916 */
917int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
918 struct drm_file *file_priv)
919{
920 union drm_vmw_surface_reference_arg *arg =
921 (union drm_vmw_surface_reference_arg *)data;
922 struct drm_vmw_surface_arg *req = &arg->req;
923 struct drm_vmw_surface_create_req *rep = &arg->rep;
924 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
925 struct vmw_surface *srf;
926 struct vmw_user_surface *user_srf;
927 struct drm_vmw_size __user *user_sizes;
928 struct ttm_base_object *base;
929 int ret = -EINVAL;
930
931 base = ttm_base_object_lookup(tfile, req->sid);
932 if (unlikely(base == NULL)) {
933 DRM_ERROR("Could not find surface to reference.\n");
934 return -EINVAL;
935 }
936
937 if (unlikely(base->object_type != VMW_RES_SURFACE))
938 goto out_bad_resource;
939
940 user_srf = container_of(base, struct vmw_user_surface, base);
941 srf = &user_srf->srf;
942
943 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
944 if (unlikely(ret != 0)) {
945 DRM_ERROR("Could not add a reference to a surface.\n");
946 goto out_no_reference;
947 }
948
949 rep->flags = srf->flags;
950 rep->format = srf->format;
951 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
952 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
953 rep->size_addr;
954
955 if (user_sizes)
956 ret = copy_to_user(user_sizes, srf->sizes,
957 srf->num_sizes * sizeof(*srf->sizes));
958 if (unlikely(ret != 0)) {
959 DRM_ERROR("copy_to_user failed %p %u\n",
960 user_sizes, srf->num_sizes);
961 ret = -EFAULT;
962 }
963out_bad_resource:
964out_no_reference:
965 ttm_base_object_unref(&base);
966
967 return ret;
968}