blob: 388b64d9752fac62ad387730fde312cbbdec9673 [file] [log] [blame]
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +01001/**************************************************************************
2 *
3 * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29
30/*
Thomas Hellstrom7cba9062014-01-09 11:03:18 +010031 * If we set up the screen target otable, screen objects stop working.
32 */
33
34#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) ? 0 : 1)
35
36/*
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010037 * Currently the MOB interface does not support 64-bit page frame numbers.
38 * This might change in the future to be similar to the GMR2 interface
39 * when virtual machines support memory beyond 16TB.
40 */
41
42#define VMW_PPN_SIZE 4
43
44/*
45 * struct vmw_mob - Structure containing page table and metadata for a
46 * Guest Memory OBject.
47 *
48 * @num_pages Number of pages that make up the page table.
49 * @pt_level The indirection level of the page table. 0-2.
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -070050 * @pt_root_page DMA address of the level 0 page of the page table.
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010051 */
52struct vmw_mob {
53 struct ttm_buffer_object *pt_bo;
54 unsigned long num_pages;
55 unsigned pt_level;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -070056 dma_addr_t pt_root_page;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010057 uint32_t id;
58};
59
60/*
61 * struct vmw_otable - Guest Memory OBject table metadata
62 *
63 * @size: Size of the table (page-aligned).
64 * @page_table: Pointer to a struct vmw_mob holding the page table.
65 */
66struct vmw_otable {
67 unsigned long size;
68 struct vmw_mob *page_table;
69};
70
71static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
72 struct vmw_mob *mob);
73static void vmw_mob_pt_setup(struct vmw_mob *mob,
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -070074 struct vmw_piter data_iter,
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010075 unsigned long num_data_pages);
76
77/*
78 * vmw_setup_otable_base - Issue an object table base setup command to
79 * the device
80 *
81 * @dev_priv: Pointer to a device private structure
82 * @type: Type of object table base
83 * @offset Start of table offset into dev_priv::otable_bo
84 * @otable Pointer to otable metadata;
85 *
86 * This function returns -ENOMEM if it fails to reserve fifo space,
87 * and may block waiting for fifo space.
88 */
89static int vmw_setup_otable_base(struct vmw_private *dev_priv,
90 SVGAOTableType type,
91 unsigned long offset,
92 struct vmw_otable *otable)
93{
94 struct {
95 SVGA3dCmdHeader header;
96 SVGA3dCmdSetOTableBase body;
97 } *cmd;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010098 struct vmw_mob *mob;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -070099 const struct vmw_sg_table *vsgt;
100 struct vmw_piter iter;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100101 int ret;
102
103 BUG_ON(otable->page_table != NULL);
104
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700105 vsgt = vmw_bo_sg_table(dev_priv->otable_bo);
106 vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
107 WARN_ON(!vmw_piter_next(&iter));
108
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100109 mob = vmw_mob_create(otable->size >> PAGE_SHIFT);
110 if (unlikely(mob == NULL)) {
111 DRM_ERROR("Failed creating OTable page table.\n");
112 return -ENOMEM;
113 }
114
115 if (otable->size <= PAGE_SIZE) {
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700116 mob->pt_level = SVGA3D_MOBFMT_PTDEPTH_0;
117 mob->pt_root_page = vmw_piter_dma_addr(&iter);
118 } else if (vsgt->num_regions == 1) {
119 mob->pt_level = SVGA3D_MOBFMT_RANGE;
120 mob->pt_root_page = vmw_piter_dma_addr(&iter);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100121 } else {
122 ret = vmw_mob_pt_populate(dev_priv, mob);
123 if (unlikely(ret != 0))
124 goto out_no_populate;
125
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700126 vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100127 }
128
129 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
130 if (unlikely(cmd == NULL)) {
131 DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
132 goto out_no_fifo;
133 }
134
135 memset(cmd, 0, sizeof(*cmd));
136 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
137 cmd->header.size = sizeof(cmd->body);
138 cmd->body.type = type;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700139 cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100140 cmd->body.sizeInBytes = otable->size;
141 cmd->body.validSizeInBytes = 0;
142 cmd->body.ptDepth = mob->pt_level;
143
144 vmw_fifo_commit(dev_priv, sizeof(*cmd));
145 otable->page_table = mob;
146
147 return 0;
148
149out_no_fifo:
150out_no_populate:
151 vmw_mob_destroy(mob);
152 return ret;
153}
154
155/*
156 * vmw_takedown_otable_base - Issue an object table base takedown command
157 * to the device
158 *
159 * @dev_priv: Pointer to a device private structure
160 * @type: Type of object table base
161 *
162 */
163static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
164 SVGAOTableType type,
165 struct vmw_otable *otable)
166{
167 struct {
168 SVGA3dCmdHeader header;
169 SVGA3dCmdSetOTableBase body;
170 } *cmd;
171 struct ttm_buffer_object *bo = otable->page_table->pt_bo;
172
173 if (otable->page_table == NULL)
174 return;
175
176 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
177 if (unlikely(cmd == NULL))
178 DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
179
180 memset(cmd, 0, sizeof(*cmd));
181 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
182 cmd->header.size = sizeof(cmd->body);
183 cmd->body.type = type;
184 cmd->body.baseAddress = 0;
185 cmd->body.sizeInBytes = 0;
186 cmd->body.validSizeInBytes = 0;
187 cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
188 vmw_fifo_commit(dev_priv, sizeof(*cmd));
189
190 if (bo) {
191 int ret;
192
193 ret = ttm_bo_reserve(bo, false, true, false, false);
194 BUG_ON(ret != 0);
195
196 vmw_fence_single_bo(bo, NULL);
197 ttm_bo_unreserve(bo);
198 }
199
200 vmw_mob_destroy(otable->page_table);
201 otable->page_table = NULL;
202}
203
204/*
205 * vmw_otables_setup - Set up guest backed memory object tables
206 *
207 * @dev_priv: Pointer to a device private structure
208 *
209 * Takes care of the device guest backed surface
210 * initialization, by setting up the guest backed memory object tables.
211 * Returns 0 on success and various error codes on failure. A succesful return
212 * means the object tables can be taken down using the vmw_otables_takedown
213 * function.
214 */
215int vmw_otables_setup(struct vmw_private *dev_priv)
216{
217 unsigned long offset;
218 unsigned long bo_size;
219 struct vmw_otable *otables;
220 SVGAOTableType i;
221 int ret;
222
Thomas Hellstrom7cba9062014-01-09 11:03:18 +0100223 otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables),
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100224 GFP_KERNEL);
225 if (unlikely(otables == NULL)) {
226 DRM_ERROR("Failed to allocate space for otable "
227 "metadata.\n");
228 return -ENOMEM;
229 }
230
231 otables[SVGA_OTABLE_MOB].size =
232 VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE;
233 otables[SVGA_OTABLE_SURFACE].size =
234 VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE;
235 otables[SVGA_OTABLE_CONTEXT].size =
236 VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE;
237 otables[SVGA_OTABLE_SHADER].size =
238 VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE;
Thomas Hellstrom7cba9062014-01-09 11:03:18 +0100239 otables[SVGA_OTABLE_SCREEN_TARGET].size =
240 VMWGFX_NUM_GB_SCREEN_TARGET *
241 SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100242
243 bo_size = 0;
Thomas Hellstrom7cba9062014-01-09 11:03:18 +0100244 for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) {
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100245 otables[i].size =
246 (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
247 bo_size += otables[i].size;
248 }
249
250 ret = ttm_bo_create(&dev_priv->bdev, bo_size,
251 ttm_bo_type_device,
252 &vmw_sys_ne_placement,
253 0, false, NULL,
254 &dev_priv->otable_bo);
255
256 if (unlikely(ret != 0))
257 goto out_no_bo;
258
259 ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, false);
260 BUG_ON(ret != 0);
261 ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100262 if (unlikely(ret != 0))
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700263 goto out_unreserve;
264 ret = vmw_bo_map_dma(dev_priv->otable_bo);
265 if (unlikely(ret != 0))
266 goto out_unreserve;
267
268 ttm_bo_unreserve(dev_priv->otable_bo);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100269
270 offset = 0;
Thomas Hellstrom7cba9062014-01-09 11:03:18 +0100271 for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) {
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100272 ret = vmw_setup_otable_base(dev_priv, i, offset,
273 &otables[i]);
274 if (unlikely(ret != 0))
275 goto out_no_setup;
276 offset += otables[i].size;
277 }
278
279 dev_priv->otables = otables;
280 return 0;
281
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700282out_unreserve:
283 ttm_bo_unreserve(dev_priv->otable_bo);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100284out_no_setup:
Thomas Hellstrom7cba9062014-01-09 11:03:18 +0100285 for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100286 vmw_takedown_otable_base(dev_priv, i, &otables[i]);
287
288 ttm_bo_unref(&dev_priv->otable_bo);
289out_no_bo:
290 kfree(otables);
291 return ret;
292}
293
294
295/*
296 * vmw_otables_takedown - Take down guest backed memory object tables
297 *
298 * @dev_priv: Pointer to a device private structure
299 *
300 * Take down the Guest Memory Object tables.
301 */
302void vmw_otables_takedown(struct vmw_private *dev_priv)
303{
304 SVGAOTableType i;
305 struct ttm_buffer_object *bo = dev_priv->otable_bo;
306 int ret;
307
Thomas Hellstrom7cba9062014-01-09 11:03:18 +0100308 for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100309 vmw_takedown_otable_base(dev_priv, i,
310 &dev_priv->otables[i]);
311
312 ret = ttm_bo_reserve(bo, false, true, false, false);
313 BUG_ON(ret != 0);
314
315 vmw_fence_single_bo(bo, NULL);
316 ttm_bo_unreserve(bo);
317
318 ttm_bo_unref(&dev_priv->otable_bo);
319 kfree(dev_priv->otables);
320 dev_priv->otables = NULL;
321}
322
323
324/*
325 * vmw_mob_calculate_pt_pages - Calculate the number of page table pages
326 * needed for a guest backed memory object.
327 *
328 * @data_pages: Number of data pages in the memory object buffer.
329 */
330static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages)
331{
332 unsigned long data_size = data_pages * PAGE_SIZE;
333 unsigned long tot_size = 0;
334
335 while (likely(data_size > PAGE_SIZE)) {
336 data_size = DIV_ROUND_UP(data_size, PAGE_SIZE);
337 data_size *= VMW_PPN_SIZE;
338 tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK;
339 }
340
341 return tot_size >> PAGE_SHIFT;
342}
343
344/*
345 * vmw_mob_create - Create a mob, but don't populate it.
346 *
347 * @data_pages: Number of data pages of the underlying buffer object.
348 */
349struct vmw_mob *vmw_mob_create(unsigned long data_pages)
350{
351 struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
352
353 if (unlikely(mob == NULL))
354 return NULL;
355
356 mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
357
358 return mob;
359}
360
361/*
362 * vmw_mob_pt_populate - Populate the mob pagetable
363 *
364 * @mob: Pointer to the mob the pagetable of which we want to
365 * populate.
366 *
367 * This function allocates memory to be used for the pagetable, and
368 * adjusts TTM memory accounting accordingly. Returns ENOMEM if
369 * memory resources aren't sufficient and may cause TTM buffer objects
370 * to be swapped out by using the TTM memory accounting function.
371 */
372static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
373 struct vmw_mob *mob)
374{
375 int ret;
376 BUG_ON(mob->pt_bo != NULL);
377
378 ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
379 ttm_bo_type_device,
380 &vmw_sys_ne_placement,
381 0, false, NULL, &mob->pt_bo);
382 if (unlikely(ret != 0))
383 return ret;
384
385 ret = ttm_bo_reserve(mob->pt_bo, false, true, false, false);
386
387 BUG_ON(ret != 0);
388 ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100389 if (unlikely(ret != 0))
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700390 goto out_unreserve;
391 ret = vmw_bo_map_dma(mob->pt_bo);
392 if (unlikely(ret != 0))
393 goto out_unreserve;
394
395 ttm_bo_unreserve(mob->pt_bo);
396
397 return 0;
398
399out_unreserve:
400 ttm_bo_unreserve(mob->pt_bo);
401 ttm_bo_unref(&mob->pt_bo);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100402
403 return ret;
404}
405
406
407/*
408 * vmw_mob_build_pt - Build a pagetable
409 *
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700410 * @data_addr: Array of DMA addresses to the underlying buffer
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100411 * object's data pages.
412 * @num_data_pages: Number of buffer object data pages.
413 * @pt_pages: Array of page pointers to the page table pages.
414 *
415 * Returns the number of page table pages actually used.
416 * Uses atomic kmaps of highmem pages to avoid TLB thrashing.
417 */
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700418static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100419 unsigned long num_data_pages,
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700420 struct vmw_piter *pt_iter)
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100421{
422 unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
423 unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700424 unsigned long pt_page;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100425 uint32_t *addr, *save_addr;
426 unsigned long i;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700427 struct page *page;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100428
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100429 for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) {
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700430 page = vmw_piter_page(pt_iter);
431
432 save_addr = addr = kmap_atomic(page);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100433
434 for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) {
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700435 u32 tmp = vmw_piter_dma_addr(data_iter) >> PAGE_SHIFT;
436 *addr++ = tmp;
437 if (unlikely(--num_data_pages == 0))
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100438 break;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700439 WARN_ON(!vmw_piter_next(data_iter));
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100440 }
441 kunmap_atomic(save_addr);
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700442 vmw_piter_next(pt_iter);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100443 }
444
445 return num_pt_pages;
446}
447
448/*
449 * vmw_mob_build_pt - Set up a multilevel mob pagetable
450 *
451 * @mob: Pointer to a mob whose page table needs setting up.
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700452 * @data_addr Array of DMA addresses to the buffer object's data
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100453 * pages.
454 * @num_data_pages: Number of buffer object data pages.
455 *
456 * Uses tail recursion to set up a multilevel mob page table.
457 */
458static void vmw_mob_pt_setup(struct vmw_mob *mob,
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700459 struct vmw_piter data_iter,
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100460 unsigned long num_data_pages)
461{
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100462 unsigned long num_pt_pages = 0;
463 struct ttm_buffer_object *bo = mob->pt_bo;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700464 struct vmw_piter save_pt_iter;
465 struct vmw_piter pt_iter;
466 const struct vmw_sg_table *vsgt;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100467 int ret;
468
469 ret = ttm_bo_reserve(bo, false, true, false, 0);
470 BUG_ON(ret != 0);
471
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700472 vsgt = vmw_bo_sg_table(bo);
473 vmw_piter_start(&pt_iter, vsgt, 0);
474 BUG_ON(!vmw_piter_next(&pt_iter));
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100475 mob->pt_level = 0;
476 while (likely(num_data_pages > 1)) {
477 ++mob->pt_level;
478 BUG_ON(mob->pt_level > 2);
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700479 save_pt_iter = pt_iter;
480 num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages,
481 &pt_iter);
482 data_iter = save_pt_iter;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100483 num_data_pages = num_pt_pages;
484 }
485
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700486 mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100487 ttm_bo_unreserve(bo);
488}
489
490/*
491 * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary.
492 *
493 * @mob: Pointer to a mob to destroy.
494 */
495void vmw_mob_destroy(struct vmw_mob *mob)
496{
497 if (mob->pt_bo)
498 ttm_bo_unref(&mob->pt_bo);
499 kfree(mob);
500}
501
502/*
503 * vmw_mob_unbind - Hide a mob from the device.
504 *
505 * @dev_priv: Pointer to a device private.
506 * @mob_id: Device id of the mob to unbind.
507 */
508void vmw_mob_unbind(struct vmw_private *dev_priv,
509 struct vmw_mob *mob)
510{
511 struct {
512 SVGA3dCmdHeader header;
513 SVGA3dCmdDestroyGBMob body;
514 } *cmd;
515 int ret;
516 struct ttm_buffer_object *bo = mob->pt_bo;
517
518 if (bo) {
519 ret = ttm_bo_reserve(bo, false, true, false, 0);
520 /*
521 * Noone else should be using this buffer.
522 */
523 BUG_ON(ret != 0);
524 }
525
526 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
527 if (unlikely(cmd == NULL)) {
528 DRM_ERROR("Failed reserving FIFO space for Memory "
529 "Object unbinding.\n");
530 }
531 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
532 cmd->header.size = sizeof(cmd->body);
533 cmd->body.mobid = mob->id;
534 vmw_fifo_commit(dev_priv, sizeof(*cmd));
535 if (bo) {
536 vmw_fence_single_bo(bo, NULL);
537 ttm_bo_unreserve(bo);
538 }
539 vmw_3d_resource_dec(dev_priv, false);
540}
541
542/*
543 * vmw_mob_bind - Make a mob visible to the device after first
544 * populating it if necessary.
545 *
546 * @dev_priv: Pointer to a device private.
547 * @mob: Pointer to the mob we're making visible.
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700548 * @data_addr: Array of DMA addresses to the data pages of the underlying
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100549 * buffer object.
550 * @num_data_pages: Number of data pages of the underlying buffer
551 * object.
552 * @mob_id: Device id of the mob to bind
553 *
554 * This function is intended to be interfaced with the ttm_tt backend
555 * code.
556 */
557int vmw_mob_bind(struct vmw_private *dev_priv,
558 struct vmw_mob *mob,
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700559 const struct vmw_sg_table *vsgt,
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100560 unsigned long num_data_pages,
561 int32_t mob_id)
562{
563 int ret;
564 bool pt_set_up = false;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700565 struct vmw_piter data_iter;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100566 struct {
567 SVGA3dCmdHeader header;
568 SVGA3dCmdDefineGBMob body;
569 } *cmd;
570
571 mob->id = mob_id;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700572 vmw_piter_start(&data_iter, vsgt, 0);
573 if (unlikely(!vmw_piter_next(&data_iter)))
574 return 0;
575
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100576 if (likely(num_data_pages == 1)) {
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700577 mob->pt_level = SVGA3D_MOBFMT_PTDEPTH_0;
578 mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
579 } else if (vsgt->num_regions == 1) {
580 mob->pt_level = SVGA3D_MOBFMT_RANGE;
581 mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100582 } else if (unlikely(mob->pt_bo == NULL)) {
583 ret = vmw_mob_pt_populate(dev_priv, mob);
584 if (unlikely(ret != 0))
585 return ret;
586
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700587 vmw_mob_pt_setup(mob, data_iter, num_data_pages);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100588 pt_set_up = true;
589 }
590
591 (void) vmw_3d_resource_inc(dev_priv, false);
592
593 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
594 if (unlikely(cmd == NULL)) {
595 DRM_ERROR("Failed reserving FIFO space for Memory "
596 "Object binding.\n");
597 goto out_no_cmd_space;
598 }
599
600 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB;
601 cmd->header.size = sizeof(cmd->body);
602 cmd->body.mobid = mob_id;
603 cmd->body.ptDepth = mob->pt_level;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700604 cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100605 cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
606
607 vmw_fifo_commit(dev_priv, sizeof(*cmd));
608
609 return 0;
610
611out_no_cmd_space:
612 vmw_3d_resource_dec(dev_priv, false);
613 if (pt_set_up)
614 ttm_bo_unref(&mob->pt_bo);
615
616 return -ENOMEM;
617}