blob: db03527b0b2fca351fb3e618023b1633585e7621 [file] [log] [blame]
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +01001/**************************************************************************
2 *
3 * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29
30/*
31 * Currently the MOB interface does not support 64-bit page frame numbers.
32 * This might change in the future to be similar to the GMR2 interface
33 * when virtual machines support memory beyond 16TB.
34 */
35
36#define VMW_PPN_SIZE 4
37
38/*
39 * struct vmw_mob - Structure containing page table and metadata for a
40 * Guest Memory OBject.
41 *
42 * @num_pages Number of pages that make up the page table.
43 * @pt_level The indirection level of the page table. 0-2.
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -070044 * @pt_root_page DMA address of the level 0 page of the page table.
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010045 */
46struct vmw_mob {
47 struct ttm_buffer_object *pt_bo;
48 unsigned long num_pages;
49 unsigned pt_level;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -070050 dma_addr_t pt_root_page;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010051 uint32_t id;
52};
53
54/*
55 * struct vmw_otable - Guest Memory OBject table metadata
56 *
57 * @size: Size of the table (page-aligned).
58 * @page_table: Pointer to a struct vmw_mob holding the page table.
59 */
60struct vmw_otable {
61 unsigned long size;
62 struct vmw_mob *page_table;
63};
64
65static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
66 struct vmw_mob *mob);
67static void vmw_mob_pt_setup(struct vmw_mob *mob,
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -070068 struct vmw_piter data_iter,
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010069 unsigned long num_data_pages);
70
71/*
72 * vmw_setup_otable_base - Issue an object table base setup command to
73 * the device
74 *
75 * @dev_priv: Pointer to a device private structure
76 * @type: Type of object table base
77 * @offset Start of table offset into dev_priv::otable_bo
78 * @otable Pointer to otable metadata;
79 *
80 * This function returns -ENOMEM if it fails to reserve fifo space,
81 * and may block waiting for fifo space.
82 */
83static int vmw_setup_otable_base(struct vmw_private *dev_priv,
84 SVGAOTableType type,
85 unsigned long offset,
86 struct vmw_otable *otable)
87{
88 struct {
89 SVGA3dCmdHeader header;
90 SVGA3dCmdSetOTableBase body;
91 } *cmd;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010092 struct vmw_mob *mob;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -070093 const struct vmw_sg_table *vsgt;
94 struct vmw_piter iter;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010095 int ret;
96
97 BUG_ON(otable->page_table != NULL);
98
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -070099 vsgt = vmw_bo_sg_table(dev_priv->otable_bo);
100 vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
101 WARN_ON(!vmw_piter_next(&iter));
102
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100103 mob = vmw_mob_create(otable->size >> PAGE_SHIFT);
104 if (unlikely(mob == NULL)) {
105 DRM_ERROR("Failed creating OTable page table.\n");
106 return -ENOMEM;
107 }
108
109 if (otable->size <= PAGE_SIZE) {
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700110 mob->pt_level = SVGA3D_MOBFMT_PTDEPTH_0;
111 mob->pt_root_page = vmw_piter_dma_addr(&iter);
112 } else if (vsgt->num_regions == 1) {
113 mob->pt_level = SVGA3D_MOBFMT_RANGE;
114 mob->pt_root_page = vmw_piter_dma_addr(&iter);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100115 } else {
116 ret = vmw_mob_pt_populate(dev_priv, mob);
117 if (unlikely(ret != 0))
118 goto out_no_populate;
119
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700120 vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100121 }
122
123 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
124 if (unlikely(cmd == NULL)) {
125 DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
126 goto out_no_fifo;
127 }
128
129 memset(cmd, 0, sizeof(*cmd));
130 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
131 cmd->header.size = sizeof(cmd->body);
132 cmd->body.type = type;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700133 cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100134 cmd->body.sizeInBytes = otable->size;
135 cmd->body.validSizeInBytes = 0;
136 cmd->body.ptDepth = mob->pt_level;
137
138 vmw_fifo_commit(dev_priv, sizeof(*cmd));
139 otable->page_table = mob;
140
141 return 0;
142
143out_no_fifo:
144out_no_populate:
145 vmw_mob_destroy(mob);
146 return ret;
147}
148
149/*
150 * vmw_takedown_otable_base - Issue an object table base takedown command
151 * to the device
152 *
153 * @dev_priv: Pointer to a device private structure
154 * @type: Type of object table base
155 *
156 */
157static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
158 SVGAOTableType type,
159 struct vmw_otable *otable)
160{
161 struct {
162 SVGA3dCmdHeader header;
163 SVGA3dCmdSetOTableBase body;
164 } *cmd;
165 struct ttm_buffer_object *bo = otable->page_table->pt_bo;
166
167 if (otable->page_table == NULL)
168 return;
169
170 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
171 if (unlikely(cmd == NULL))
172 DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
173
174 memset(cmd, 0, sizeof(*cmd));
175 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
176 cmd->header.size = sizeof(cmd->body);
177 cmd->body.type = type;
178 cmd->body.baseAddress = 0;
179 cmd->body.sizeInBytes = 0;
180 cmd->body.validSizeInBytes = 0;
181 cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
182 vmw_fifo_commit(dev_priv, sizeof(*cmd));
183
184 if (bo) {
185 int ret;
186
187 ret = ttm_bo_reserve(bo, false, true, false, false);
188 BUG_ON(ret != 0);
189
190 vmw_fence_single_bo(bo, NULL);
191 ttm_bo_unreserve(bo);
192 }
193
194 vmw_mob_destroy(otable->page_table);
195 otable->page_table = NULL;
196}
197
198/*
199 * vmw_otables_setup - Set up guest backed memory object tables
200 *
201 * @dev_priv: Pointer to a device private structure
202 *
203 * Takes care of the device guest backed surface
204 * initialization, by setting up the guest backed memory object tables.
205 * Returns 0 on success and various error codes on failure. A succesful return
206 * means the object tables can be taken down using the vmw_otables_takedown
207 * function.
208 */
209int vmw_otables_setup(struct vmw_private *dev_priv)
210{
211 unsigned long offset;
212 unsigned long bo_size;
213 struct vmw_otable *otables;
214 SVGAOTableType i;
215 int ret;
216
217 otables = kzalloc(SVGA_OTABLE_COUNT * sizeof(*otables),
218 GFP_KERNEL);
219 if (unlikely(otables == NULL)) {
220 DRM_ERROR("Failed to allocate space for otable "
221 "metadata.\n");
222 return -ENOMEM;
223 }
224
225 otables[SVGA_OTABLE_MOB].size =
226 VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE;
227 otables[SVGA_OTABLE_SURFACE].size =
228 VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE;
229 otables[SVGA_OTABLE_CONTEXT].size =
230 VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE;
231 otables[SVGA_OTABLE_SHADER].size =
232 VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE;
233
234 bo_size = 0;
235 for (i = 0; i < SVGA_OTABLE_COUNT; ++i) {
236 otables[i].size =
237 (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
238 bo_size += otables[i].size;
239 }
240
241 ret = ttm_bo_create(&dev_priv->bdev, bo_size,
242 ttm_bo_type_device,
243 &vmw_sys_ne_placement,
244 0, false, NULL,
245 &dev_priv->otable_bo);
246
247 if (unlikely(ret != 0))
248 goto out_no_bo;
249
250 ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, false);
251 BUG_ON(ret != 0);
252 ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100253 if (unlikely(ret != 0))
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700254 goto out_unreserve;
255 ret = vmw_bo_map_dma(dev_priv->otable_bo);
256 if (unlikely(ret != 0))
257 goto out_unreserve;
258
259 ttm_bo_unreserve(dev_priv->otable_bo);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100260
261 offset = 0;
262 for (i = 0; i < SVGA_OTABLE_COUNT; ++i) {
263 ret = vmw_setup_otable_base(dev_priv, i, offset,
264 &otables[i]);
265 if (unlikely(ret != 0))
266 goto out_no_setup;
267 offset += otables[i].size;
268 }
269
270 dev_priv->otables = otables;
271 return 0;
272
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700273out_unreserve:
274 ttm_bo_unreserve(dev_priv->otable_bo);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100275out_no_setup:
276 for (i = 0; i < SVGA_OTABLE_COUNT; ++i)
277 vmw_takedown_otable_base(dev_priv, i, &otables[i]);
278
279 ttm_bo_unref(&dev_priv->otable_bo);
280out_no_bo:
281 kfree(otables);
282 return ret;
283}
284
285
286/*
287 * vmw_otables_takedown - Take down guest backed memory object tables
288 *
289 * @dev_priv: Pointer to a device private structure
290 *
291 * Take down the Guest Memory Object tables.
292 */
293void vmw_otables_takedown(struct vmw_private *dev_priv)
294{
295 SVGAOTableType i;
296 struct ttm_buffer_object *bo = dev_priv->otable_bo;
297 int ret;
298
299 for (i = 0; i < SVGA_OTABLE_COUNT; ++i)
300 vmw_takedown_otable_base(dev_priv, i,
301 &dev_priv->otables[i]);
302
303 ret = ttm_bo_reserve(bo, false, true, false, false);
304 BUG_ON(ret != 0);
305
306 vmw_fence_single_bo(bo, NULL);
307 ttm_bo_unreserve(bo);
308
309 ttm_bo_unref(&dev_priv->otable_bo);
310 kfree(dev_priv->otables);
311 dev_priv->otables = NULL;
312}
313
314
315/*
316 * vmw_mob_calculate_pt_pages - Calculate the number of page table pages
317 * needed for a guest backed memory object.
318 *
319 * @data_pages: Number of data pages in the memory object buffer.
320 */
321static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages)
322{
323 unsigned long data_size = data_pages * PAGE_SIZE;
324 unsigned long tot_size = 0;
325
326 while (likely(data_size > PAGE_SIZE)) {
327 data_size = DIV_ROUND_UP(data_size, PAGE_SIZE);
328 data_size *= VMW_PPN_SIZE;
329 tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK;
330 }
331
332 return tot_size >> PAGE_SHIFT;
333}
334
335/*
336 * vmw_mob_create - Create a mob, but don't populate it.
337 *
338 * @data_pages: Number of data pages of the underlying buffer object.
339 */
340struct vmw_mob *vmw_mob_create(unsigned long data_pages)
341{
342 struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
343
344 if (unlikely(mob == NULL))
345 return NULL;
346
347 mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
348
349 return mob;
350}
351
352/*
353 * vmw_mob_pt_populate - Populate the mob pagetable
354 *
355 * @mob: Pointer to the mob the pagetable of which we want to
356 * populate.
357 *
358 * This function allocates memory to be used for the pagetable, and
359 * adjusts TTM memory accounting accordingly. Returns ENOMEM if
360 * memory resources aren't sufficient and may cause TTM buffer objects
361 * to be swapped out by using the TTM memory accounting function.
362 */
363static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
364 struct vmw_mob *mob)
365{
366 int ret;
367 BUG_ON(mob->pt_bo != NULL);
368
369 ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
370 ttm_bo_type_device,
371 &vmw_sys_ne_placement,
372 0, false, NULL, &mob->pt_bo);
373 if (unlikely(ret != 0))
374 return ret;
375
376 ret = ttm_bo_reserve(mob->pt_bo, false, true, false, false);
377
378 BUG_ON(ret != 0);
379 ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100380 if (unlikely(ret != 0))
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700381 goto out_unreserve;
382 ret = vmw_bo_map_dma(mob->pt_bo);
383 if (unlikely(ret != 0))
384 goto out_unreserve;
385
386 ttm_bo_unreserve(mob->pt_bo);
387
388 return 0;
389
390out_unreserve:
391 ttm_bo_unreserve(mob->pt_bo);
392 ttm_bo_unref(&mob->pt_bo);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100393
394 return ret;
395}
396
397
398/*
399 * vmw_mob_build_pt - Build a pagetable
400 *
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700401 * @data_addr: Array of DMA addresses to the underlying buffer
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100402 * object's data pages.
403 * @num_data_pages: Number of buffer object data pages.
404 * @pt_pages: Array of page pointers to the page table pages.
405 *
406 * Returns the number of page table pages actually used.
407 * Uses atomic kmaps of highmem pages to avoid TLB thrashing.
408 */
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700409static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100410 unsigned long num_data_pages,
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700411 struct vmw_piter *pt_iter)
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100412{
413 unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
414 unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700415 unsigned long pt_page;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100416 uint32_t *addr, *save_addr;
417 unsigned long i;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700418 struct page *page;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100419
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100420 for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) {
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700421 page = vmw_piter_page(pt_iter);
422
423 save_addr = addr = kmap_atomic(page);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100424
425 for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) {
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700426 u32 tmp = vmw_piter_dma_addr(data_iter) >> PAGE_SHIFT;
427 *addr++ = tmp;
428 if (unlikely(--num_data_pages == 0))
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100429 break;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700430 WARN_ON(!vmw_piter_next(data_iter));
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100431 }
432 kunmap_atomic(save_addr);
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700433 vmw_piter_next(pt_iter);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100434 }
435
436 return num_pt_pages;
437}
438
439/*
440 * vmw_mob_build_pt - Set up a multilevel mob pagetable
441 *
442 * @mob: Pointer to a mob whose page table needs setting up.
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700443 * @data_addr Array of DMA addresses to the buffer object's data
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100444 * pages.
445 * @num_data_pages: Number of buffer object data pages.
446 *
447 * Uses tail recursion to set up a multilevel mob page table.
448 */
449static void vmw_mob_pt_setup(struct vmw_mob *mob,
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700450 struct vmw_piter data_iter,
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100451 unsigned long num_data_pages)
452{
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100453 unsigned long num_pt_pages = 0;
454 struct ttm_buffer_object *bo = mob->pt_bo;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700455 struct vmw_piter save_pt_iter;
456 struct vmw_piter pt_iter;
457 const struct vmw_sg_table *vsgt;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100458 int ret;
459
460 ret = ttm_bo_reserve(bo, false, true, false, 0);
461 BUG_ON(ret != 0);
462
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700463 vsgt = vmw_bo_sg_table(bo);
464 vmw_piter_start(&pt_iter, vsgt, 0);
465 BUG_ON(!vmw_piter_next(&pt_iter));
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100466 mob->pt_level = 0;
467 while (likely(num_data_pages > 1)) {
468 ++mob->pt_level;
469 BUG_ON(mob->pt_level > 2);
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700470 save_pt_iter = pt_iter;
471 num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages,
472 &pt_iter);
473 data_iter = save_pt_iter;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100474 num_data_pages = num_pt_pages;
475 }
476
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700477 mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100478 ttm_bo_unreserve(bo);
479}
480
481/*
482 * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary.
483 *
484 * @mob: Pointer to a mob to destroy.
485 */
486void vmw_mob_destroy(struct vmw_mob *mob)
487{
488 if (mob->pt_bo)
489 ttm_bo_unref(&mob->pt_bo);
490 kfree(mob);
491}
492
493/*
494 * vmw_mob_unbind - Hide a mob from the device.
495 *
496 * @dev_priv: Pointer to a device private.
497 * @mob_id: Device id of the mob to unbind.
498 */
499void vmw_mob_unbind(struct vmw_private *dev_priv,
500 struct vmw_mob *mob)
501{
502 struct {
503 SVGA3dCmdHeader header;
504 SVGA3dCmdDestroyGBMob body;
505 } *cmd;
506 int ret;
507 struct ttm_buffer_object *bo = mob->pt_bo;
508
509 if (bo) {
510 ret = ttm_bo_reserve(bo, false, true, false, 0);
511 /*
512 * Noone else should be using this buffer.
513 */
514 BUG_ON(ret != 0);
515 }
516
517 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
518 if (unlikely(cmd == NULL)) {
519 DRM_ERROR("Failed reserving FIFO space for Memory "
520 "Object unbinding.\n");
521 }
522 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
523 cmd->header.size = sizeof(cmd->body);
524 cmd->body.mobid = mob->id;
525 vmw_fifo_commit(dev_priv, sizeof(*cmd));
526 if (bo) {
527 vmw_fence_single_bo(bo, NULL);
528 ttm_bo_unreserve(bo);
529 }
530 vmw_3d_resource_dec(dev_priv, false);
531}
532
533/*
534 * vmw_mob_bind - Make a mob visible to the device after first
535 * populating it if necessary.
536 *
537 * @dev_priv: Pointer to a device private.
538 * @mob: Pointer to the mob we're making visible.
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700539 * @data_addr: Array of DMA addresses to the data pages of the underlying
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100540 * buffer object.
541 * @num_data_pages: Number of data pages of the underlying buffer
542 * object.
543 * @mob_id: Device id of the mob to bind
544 *
545 * This function is intended to be interfaced with the ttm_tt backend
546 * code.
547 */
548int vmw_mob_bind(struct vmw_private *dev_priv,
549 struct vmw_mob *mob,
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700550 const struct vmw_sg_table *vsgt,
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100551 unsigned long num_data_pages,
552 int32_t mob_id)
553{
554 int ret;
555 bool pt_set_up = false;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700556 struct vmw_piter data_iter;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100557 struct {
558 SVGA3dCmdHeader header;
559 SVGA3dCmdDefineGBMob body;
560 } *cmd;
561
562 mob->id = mob_id;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700563 vmw_piter_start(&data_iter, vsgt, 0);
564 if (unlikely(!vmw_piter_next(&data_iter)))
565 return 0;
566
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100567 if (likely(num_data_pages == 1)) {
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700568 mob->pt_level = SVGA3D_MOBFMT_PTDEPTH_0;
569 mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
570 } else if (vsgt->num_regions == 1) {
571 mob->pt_level = SVGA3D_MOBFMT_RANGE;
572 mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100573 } else if (unlikely(mob->pt_bo == NULL)) {
574 ret = vmw_mob_pt_populate(dev_priv, mob);
575 if (unlikely(ret != 0))
576 return ret;
577
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700578 vmw_mob_pt_setup(mob, data_iter, num_data_pages);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100579 pt_set_up = true;
580 }
581
582 (void) vmw_3d_resource_inc(dev_priv, false);
583
584 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
585 if (unlikely(cmd == NULL)) {
586 DRM_ERROR("Failed reserving FIFO space for Memory "
587 "Object binding.\n");
588 goto out_no_cmd_space;
589 }
590
591 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB;
592 cmd->header.size = sizeof(cmd->body);
593 cmd->body.mobid = mob_id;
594 cmd->body.ptDepth = mob->pt_level;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700595 cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100596 cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
597
598 vmw_fifo_commit(dev_priv, sizeof(*cmd));
599
600 return 0;
601
602out_no_cmd_space:
603 vmw_3d_resource_dec(dev_priv, false);
604 if (pt_set_up)
605 ttm_bo_unref(&mob->pt_bo);
606
607 return -ENOMEM;
608}