blob: 23db16008e39f1301e5c25d49d9562ad9ce95f13 [file] [log] [blame]
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +01001/**************************************************************************
2 *
Sinclair Yeh54fbde82015-07-29 12:38:02 -07003 * Copyright © 2012-2015 VMware, Inc., Palo Alto, CA., USA
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +01004 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29
30/*
Thomas Hellstrom7cba9062014-01-09 11:03:18 +010031 * If we set up the screen target otable, screen objects stop working.
32 */
33
Sinclair Yehf89c6c32015-06-26 01:54:28 -070034#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE ? 0 : 1))
Thomas Hellstrom7cba9062014-01-09 11:03:18 +010035
Thomas Hellstromf2a0dcb2014-01-15 10:04:07 +010036#ifdef CONFIG_64BIT
37#define VMW_PPN_SIZE 8
Thomas Hellstromf2a0dcb2014-01-15 10:04:07 +010038#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0
39#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1
40#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2
41#else
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010042#define VMW_PPN_SIZE 4
Thomas Hellstromf2a0dcb2014-01-15 10:04:07 +010043#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0
44#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1
45#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2
46#endif
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010047
48/*
49 * struct vmw_mob - Structure containing page table and metadata for a
50 * Guest Memory OBject.
51 *
52 * @num_pages Number of pages that make up the page table.
53 * @pt_level The indirection level of the page table. 0-2.
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -070054 * @pt_root_page DMA address of the level 0 page of the page table.
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010055 */
56struct vmw_mob {
57 struct ttm_buffer_object *pt_bo;
58 unsigned long num_pages;
59 unsigned pt_level;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -070060 dma_addr_t pt_root_page;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010061 uint32_t id;
62};
63
64/*
65 * struct vmw_otable - Guest Memory OBject table metadata
66 *
67 * @size: Size of the table (page-aligned).
68 * @page_table: Pointer to a struct vmw_mob holding the page table.
69 */
Thomas Hellstromd80efd52015-08-10 10:39:35 -070070static const struct vmw_otable pre_dx_tables[] = {
71 {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
72 {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
73 {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
74 {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
75 {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
76 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}
77};
78
79static const struct vmw_otable dx_tables[] = {
80 {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
81 {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
82 {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
83 {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
84 {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
85 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE},
86 {VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true},
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010087};
88
89static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
90 struct vmw_mob *mob);
91static void vmw_mob_pt_setup(struct vmw_mob *mob,
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -070092 struct vmw_piter data_iter,
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010093 unsigned long num_data_pages);
94
95/*
96 * vmw_setup_otable_base - Issue an object table base setup command to
97 * the device
98 *
99 * @dev_priv: Pointer to a device private structure
100 * @type: Type of object table base
101 * @offset Start of table offset into dev_priv::otable_bo
102 * @otable Pointer to otable metadata;
103 *
104 * This function returns -ENOMEM if it fails to reserve fifo space,
105 * and may block waiting for fifo space.
106 */
107static int vmw_setup_otable_base(struct vmw_private *dev_priv,
108 SVGAOTableType type,
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700109 struct ttm_buffer_object *otable_bo,
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100110 unsigned long offset,
111 struct vmw_otable *otable)
112{
113 struct {
114 SVGA3dCmdHeader header;
Thomas Hellstrom3e894a62014-01-20 11:33:04 +0100115 SVGA3dCmdSetOTableBase64 body;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100116 } *cmd;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100117 struct vmw_mob *mob;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700118 const struct vmw_sg_table *vsgt;
119 struct vmw_piter iter;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100120 int ret;
121
122 BUG_ON(otable->page_table != NULL);
123
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700124 vsgt = vmw_bo_sg_table(otable_bo);
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700125 vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
126 WARN_ON(!vmw_piter_next(&iter));
127
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100128 mob = vmw_mob_create(otable->size >> PAGE_SHIFT);
129 if (unlikely(mob == NULL)) {
130 DRM_ERROR("Failed creating OTable page table.\n");
131 return -ENOMEM;
132 }
133
134 if (otable->size <= PAGE_SIZE) {
Thomas Hellstromf2a0dcb2014-01-15 10:04:07 +0100135 mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700136 mob->pt_root_page = vmw_piter_dma_addr(&iter);
137 } else if (vsgt->num_regions == 1) {
138 mob->pt_level = SVGA3D_MOBFMT_RANGE;
139 mob->pt_root_page = vmw_piter_dma_addr(&iter);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100140 } else {
141 ret = vmw_mob_pt_populate(dev_priv, mob);
142 if (unlikely(ret != 0))
143 goto out_no_populate;
144
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700145 vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT);
Thomas Hellstromf2a0dcb2014-01-15 10:04:07 +0100146 mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100147 }
148
149 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
150 if (unlikely(cmd == NULL)) {
151 DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
Dave Jonescd9a21a2014-01-30 21:27:25 -0500152 ret = -ENOMEM;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100153 goto out_no_fifo;
154 }
155
156 memset(cmd, 0, sizeof(*cmd));
Thomas Hellstrom3e894a62014-01-20 11:33:04 +0100157 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100158 cmd->header.size = sizeof(cmd->body);
159 cmd->body.type = type;
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700160 cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100161 cmd->body.sizeInBytes = otable->size;
162 cmd->body.validSizeInBytes = 0;
163 cmd->body.ptDepth = mob->pt_level;
164
Thomas Hellstromf2a0dcb2014-01-15 10:04:07 +0100165 /*
166 * The device doesn't support this, But the otable size is
167 * determined at compile-time, so this BUG shouldn't trigger
168 * randomly.
169 */
170 BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2);
171
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100172 vmw_fifo_commit(dev_priv, sizeof(*cmd));
173 otable->page_table = mob;
174
175 return 0;
176
177out_no_fifo:
178out_no_populate:
179 vmw_mob_destroy(mob);
180 return ret;
181}
182
183/*
184 * vmw_takedown_otable_base - Issue an object table base takedown command
185 * to the device
186 *
187 * @dev_priv: Pointer to a device private structure
188 * @type: Type of object table base
189 *
190 */
191static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
192 SVGAOTableType type,
193 struct vmw_otable *otable)
194{
195 struct {
196 SVGA3dCmdHeader header;
197 SVGA3dCmdSetOTableBase body;
198 } *cmd;
Thomas Hellstrom3e894a62014-01-20 11:33:04 +0100199 struct ttm_buffer_object *bo;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100200
201 if (otable->page_table == NULL)
202 return;
203
Thomas Hellstrom3e894a62014-01-20 11:33:04 +0100204 bo = otable->page_table->pt_bo;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100205 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
Alexey Khoroshilov6950e232014-03-01 01:20:18 +0400206 if (unlikely(cmd == NULL)) {
207 DRM_ERROR("Failed reserving FIFO space for OTable "
208 "takedown.\n");
Thomas Hellstrom13eec7e2015-06-25 11:12:17 -0700209 return;
Alexey Khoroshilov6950e232014-03-01 01:20:18 +0400210 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700211
Thomas Hellstrom13eec7e2015-06-25 11:12:17 -0700212 memset(cmd, 0, sizeof(*cmd));
213 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
214 cmd->header.size = sizeof(cmd->body);
215 cmd->body.type = type;
216 cmd->body.baseAddress = 0;
217 cmd->body.sizeInBytes = 0;
218 cmd->body.validSizeInBytes = 0;
219 cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
220 vmw_fifo_commit(dev_priv, sizeof(*cmd));
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100221
222 if (bo) {
223 int ret;
224
Thomas Hellstrom3e894a62014-01-20 11:33:04 +0100225 ret = ttm_bo_reserve(bo, false, true, false, NULL);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100226 BUG_ON(ret != 0);
227
228 vmw_fence_single_bo(bo, NULL);
229 ttm_bo_unreserve(bo);
230 }
231
232 vmw_mob_destroy(otable->page_table);
233 otable->page_table = NULL;
234}
235
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700236
237static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
238 struct vmw_otable_batch *batch)
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100239{
240 unsigned long offset;
241 unsigned long bo_size;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700242 struct vmw_otable *otables = batch->otables;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100243 SVGAOTableType i;
244 int ret;
245
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100246 bo_size = 0;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700247 for (i = 0; i < batch->num_otables; ++i) {
248 if (!otables[i].enabled)
249 continue;
250
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100251 otables[i].size =
252 (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
253 bo_size += otables[i].size;
254 }
255
256 ret = ttm_bo_create(&dev_priv->bdev, bo_size,
257 ttm_bo_type_device,
258 &vmw_sys_ne_placement,
259 0, false, NULL,
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700260 &batch->otable_bo);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100261
262 if (unlikely(ret != 0))
263 goto out_no_bo;
264
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700265 ret = ttm_bo_reserve(batch->otable_bo, false, true, false, NULL);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100266 BUG_ON(ret != 0);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700267 ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100268 if (unlikely(ret != 0))
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700269 goto out_unreserve;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700270 ret = vmw_bo_map_dma(batch->otable_bo);
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700271 if (unlikely(ret != 0))
272 goto out_unreserve;
273
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700274 ttm_bo_unreserve(batch->otable_bo);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100275
276 offset = 0;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700277 for (i = 0; i < batch->num_otables; ++i) {
278 if (!batch->otables[i].enabled)
279 continue;
280
281 ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo,
282 offset,
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100283 &otables[i]);
284 if (unlikely(ret != 0))
285 goto out_no_setup;
286 offset += otables[i].size;
287 }
288
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100289 return 0;
290
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700291out_unreserve:
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700292 ttm_bo_unreserve(batch->otable_bo);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100293out_no_setup:
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700294 for (i = 0; i < batch->num_otables; ++i) {
295 if (batch->otables[i].enabled)
296 vmw_takedown_otable_base(dev_priv, i,
297 &batch->otables[i]);
298 }
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100299
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700300 ttm_bo_unref(&batch->otable_bo);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100301out_no_bo:
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100302 return ret;
303}
304
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700305/*
306 * vmw_otables_setup - Set up guest backed memory object tables
307 *
308 * @dev_priv: Pointer to a device private structure
309 *
310 * Takes care of the device guest backed surface
311 * initialization, by setting up the guest backed memory object tables.
312 * Returns 0 on success and various error codes on failure. A successful return
313 * means the object tables can be taken down using the vmw_otables_takedown
314 * function.
315 */
316int vmw_otables_setup(struct vmw_private *dev_priv)
317{
318 struct vmw_otable **otables = &dev_priv->otable_batch.otables;
319 int ret;
320
321 if (dev_priv->has_dx) {
322 *otables = kmalloc(sizeof(dx_tables), GFP_KERNEL);
323 if (*otables == NULL)
324 return -ENOMEM;
325
326 memcpy(*otables, dx_tables, sizeof(dx_tables));
327 dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
328 } else {
329 *otables = kmalloc(sizeof(pre_dx_tables), GFP_KERNEL);
330 if (*otables == NULL)
331 return -ENOMEM;
332
333 memcpy(*otables, pre_dx_tables, sizeof(pre_dx_tables));
334 dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
335 }
336
337 ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch);
338 if (unlikely(ret != 0))
339 goto out_setup;
340
341 return 0;
342
343out_setup:
344 kfree(*otables);
345 return ret;
346}
347
348static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
349 struct vmw_otable_batch *batch)
350{
351 SVGAOTableType i;
352 struct ttm_buffer_object *bo = batch->otable_bo;
353 int ret;
354
355 for (i = 0; i < batch->num_otables; ++i)
356 if (batch->otables[i].enabled)
357 vmw_takedown_otable_base(dev_priv, i,
358 &batch->otables[i]);
359
360 ret = ttm_bo_reserve(bo, false, true, false, NULL);
361 BUG_ON(ret != 0);
362
363 vmw_fence_single_bo(bo, NULL);
364 ttm_bo_unreserve(bo);
365
366 ttm_bo_unref(&batch->otable_bo);
367}
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100368
369/*
370 * vmw_otables_takedown - Take down guest backed memory object tables
371 *
372 * @dev_priv: Pointer to a device private structure
373 *
374 * Take down the Guest Memory Object tables.
375 */
376void vmw_otables_takedown(struct vmw_private *dev_priv)
377{
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700378 vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch);
379 kfree(dev_priv->otable_batch.otables);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100380}
381
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100382/*
383 * vmw_mob_calculate_pt_pages - Calculate the number of page table pages
384 * needed for a guest backed memory object.
385 *
386 * @data_pages: Number of data pages in the memory object buffer.
387 */
388static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages)
389{
390 unsigned long data_size = data_pages * PAGE_SIZE;
391 unsigned long tot_size = 0;
392
393 while (likely(data_size > PAGE_SIZE)) {
394 data_size = DIV_ROUND_UP(data_size, PAGE_SIZE);
395 data_size *= VMW_PPN_SIZE;
396 tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK;
397 }
398
399 return tot_size >> PAGE_SHIFT;
400}
401
402/*
403 * vmw_mob_create - Create a mob, but don't populate it.
404 *
405 * @data_pages: Number of data pages of the underlying buffer object.
406 */
407struct vmw_mob *vmw_mob_create(unsigned long data_pages)
408{
409 struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
410
411 if (unlikely(mob == NULL))
412 return NULL;
413
414 mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
415
416 return mob;
417}
418
419/*
420 * vmw_mob_pt_populate - Populate the mob pagetable
421 *
422 * @mob: Pointer to the mob the pagetable of which we want to
423 * populate.
424 *
425 * This function allocates memory to be used for the pagetable, and
426 * adjusts TTM memory accounting accordingly. Returns ENOMEM if
427 * memory resources aren't sufficient and may cause TTM buffer objects
428 * to be swapped out by using the TTM memory accounting function.
429 */
430static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
431 struct vmw_mob *mob)
432{
433 int ret;
434 BUG_ON(mob->pt_bo != NULL);
435
436 ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
437 ttm_bo_type_device,
438 &vmw_sys_ne_placement,
439 0, false, NULL, &mob->pt_bo);
440 if (unlikely(ret != 0))
441 return ret;
442
Thomas Hellstrom3e894a62014-01-20 11:33:04 +0100443 ret = ttm_bo_reserve(mob->pt_bo, false, true, false, NULL);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100444
445 BUG_ON(ret != 0);
446 ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100447 if (unlikely(ret != 0))
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700448 goto out_unreserve;
449 ret = vmw_bo_map_dma(mob->pt_bo);
450 if (unlikely(ret != 0))
451 goto out_unreserve;
452
453 ttm_bo_unreserve(mob->pt_bo);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700454
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700455 return 0;
456
457out_unreserve:
458 ttm_bo_unreserve(mob->pt_bo);
459 ttm_bo_unref(&mob->pt_bo);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100460
461 return ret;
462}
463
Thomas Hellstromf2a0dcb2014-01-15 10:04:07 +0100464/**
465 * vmw_mob_assign_ppn - Assign a value to a page table entry
466 *
467 * @addr: Pointer to pointer to page table entry.
468 * @val: The page table entry
469 *
470 * Assigns a value to a page table entry pointed to by *@addr and increments
471 * *@addr according to the page table entry size.
472 */
473#if (VMW_PPN_SIZE == 8)
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700474static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
Thomas Hellstromf2a0dcb2014-01-15 10:04:07 +0100475{
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700476 *((u64 *) *addr) = val >> PAGE_SHIFT;
Thomas Hellstromf2a0dcb2014-01-15 10:04:07 +0100477 *addr += 2;
478}
479#else
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700480static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
Thomas Hellstromf2a0dcb2014-01-15 10:04:07 +0100481{
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700482 *(*addr)++ = val >> PAGE_SHIFT;
Thomas Hellstromf2a0dcb2014-01-15 10:04:07 +0100483}
484#endif
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100485
486/*
487 * vmw_mob_build_pt - Build a pagetable
488 *
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700489 * @data_addr: Array of DMA addresses to the underlying buffer
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100490 * object's data pages.
491 * @num_data_pages: Number of buffer object data pages.
492 * @pt_pages: Array of page pointers to the page table pages.
493 *
494 * Returns the number of page table pages actually used.
495 * Uses atomic kmaps of highmem pages to avoid TLB thrashing.
496 */
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700497static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100498 unsigned long num_data_pages,
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700499 struct vmw_piter *pt_iter)
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100500{
501 unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
502 unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700503 unsigned long pt_page;
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700504 u32 *addr, *save_addr;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100505 unsigned long i;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700506 struct page *page;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100507
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100508 for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) {
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700509 page = vmw_piter_page(pt_iter);
510
511 save_addr = addr = kmap_atomic(page);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100512
513 for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) {
Thomas Hellstromf2a0dcb2014-01-15 10:04:07 +0100514 vmw_mob_assign_ppn(&addr,
515 vmw_piter_dma_addr(data_iter));
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700516 if (unlikely(--num_data_pages == 0))
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100517 break;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700518 WARN_ON(!vmw_piter_next(data_iter));
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100519 }
520 kunmap_atomic(save_addr);
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700521 vmw_piter_next(pt_iter);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100522 }
523
524 return num_pt_pages;
525}
526
527/*
528 * vmw_mob_build_pt - Set up a multilevel mob pagetable
529 *
530 * @mob: Pointer to a mob whose page table needs setting up.
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700531 * @data_addr Array of DMA addresses to the buffer object's data
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100532 * pages.
533 * @num_data_pages: Number of buffer object data pages.
534 *
535 * Uses tail recursion to set up a multilevel mob page table.
536 */
537static void vmw_mob_pt_setup(struct vmw_mob *mob,
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700538 struct vmw_piter data_iter,
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100539 unsigned long num_data_pages)
540{
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100541 unsigned long num_pt_pages = 0;
542 struct ttm_buffer_object *bo = mob->pt_bo;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700543 struct vmw_piter save_pt_iter;
544 struct vmw_piter pt_iter;
545 const struct vmw_sg_table *vsgt;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100546 int ret;
547
Thomas Hellstrom3e894a62014-01-20 11:33:04 +0100548 ret = ttm_bo_reserve(bo, false, true, false, NULL);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100549 BUG_ON(ret != 0);
550
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700551 vsgt = vmw_bo_sg_table(bo);
552 vmw_piter_start(&pt_iter, vsgt, 0);
553 BUG_ON(!vmw_piter_next(&pt_iter));
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100554 mob->pt_level = 0;
555 while (likely(num_data_pages > 1)) {
556 ++mob->pt_level;
557 BUG_ON(mob->pt_level > 2);
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700558 save_pt_iter = pt_iter;
559 num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages,
560 &pt_iter);
561 data_iter = save_pt_iter;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100562 num_data_pages = num_pt_pages;
563 }
564
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700565 mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100566 ttm_bo_unreserve(bo);
567}
568
569/*
570 * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary.
571 *
572 * @mob: Pointer to a mob to destroy.
573 */
574void vmw_mob_destroy(struct vmw_mob *mob)
575{
576 if (mob->pt_bo)
577 ttm_bo_unref(&mob->pt_bo);
578 kfree(mob);
579}
580
581/*
582 * vmw_mob_unbind - Hide a mob from the device.
583 *
584 * @dev_priv: Pointer to a device private.
585 * @mob_id: Device id of the mob to unbind.
586 */
587void vmw_mob_unbind(struct vmw_private *dev_priv,
588 struct vmw_mob *mob)
589{
590 struct {
591 SVGA3dCmdHeader header;
592 SVGA3dCmdDestroyGBMob body;
593 } *cmd;
594 int ret;
595 struct ttm_buffer_object *bo = mob->pt_bo;
596
597 if (bo) {
Thomas Hellstrom3e894a62014-01-20 11:33:04 +0100598 ret = ttm_bo_reserve(bo, false, true, false, NULL);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100599 /*
600 * Noone else should be using this buffer.
601 */
602 BUG_ON(ret != 0);
603 }
604
605 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
606 if (unlikely(cmd == NULL)) {
607 DRM_ERROR("Failed reserving FIFO space for Memory "
608 "Object unbinding.\n");
Alexey Khoroshilov6950e232014-03-01 01:20:18 +0400609 } else {
610 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
611 cmd->header.size = sizeof(cmd->body);
612 cmd->body.mobid = mob->id;
613 vmw_fifo_commit(dev_priv, sizeof(*cmd));
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100614 }
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100615 if (bo) {
616 vmw_fence_single_bo(bo, NULL);
617 ttm_bo_unreserve(bo);
618 }
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700619 vmw_fifo_resource_dec(dev_priv);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100620}
621
622/*
623 * vmw_mob_bind - Make a mob visible to the device after first
624 * populating it if necessary.
625 *
626 * @dev_priv: Pointer to a device private.
627 * @mob: Pointer to the mob we're making visible.
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700628 * @data_addr: Array of DMA addresses to the data pages of the underlying
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100629 * buffer object.
630 * @num_data_pages: Number of data pages of the underlying buffer
631 * object.
632 * @mob_id: Device id of the mob to bind
633 *
634 * This function is intended to be interfaced with the ttm_tt backend
635 * code.
636 */
637int vmw_mob_bind(struct vmw_private *dev_priv,
638 struct vmw_mob *mob,
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700639 const struct vmw_sg_table *vsgt,
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100640 unsigned long num_data_pages,
641 int32_t mob_id)
642{
643 int ret;
644 bool pt_set_up = false;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700645 struct vmw_piter data_iter;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100646 struct {
647 SVGA3dCmdHeader header;
Thomas Hellstrom3e894a62014-01-20 11:33:04 +0100648 SVGA3dCmdDefineGBMob64 body;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100649 } *cmd;
650
651 mob->id = mob_id;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700652 vmw_piter_start(&data_iter, vsgt, 0);
653 if (unlikely(!vmw_piter_next(&data_iter)))
654 return 0;
655
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100656 if (likely(num_data_pages == 1)) {
Thomas Hellstromf2a0dcb2014-01-15 10:04:07 +0100657 mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700658 mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
659 } else if (vsgt->num_regions == 1) {
660 mob->pt_level = SVGA3D_MOBFMT_RANGE;
661 mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100662 } else if (unlikely(mob->pt_bo == NULL)) {
663 ret = vmw_mob_pt_populate(dev_priv, mob);
664 if (unlikely(ret != 0))
665 return ret;
666
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700667 vmw_mob_pt_setup(mob, data_iter, num_data_pages);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100668 pt_set_up = true;
Thomas Hellstromf2a0dcb2014-01-15 10:04:07 +0100669 mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100670 }
671
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700672 vmw_fifo_resource_inc(dev_priv);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100673
674 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
675 if (unlikely(cmd == NULL)) {
676 DRM_ERROR("Failed reserving FIFO space for Memory "
677 "Object binding.\n");
678 goto out_no_cmd_space;
679 }
680
Thomas Hellstrom3e894a62014-01-20 11:33:04 +0100681 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100682 cmd->header.size = sizeof(cmd->body);
683 cmd->body.mobid = mob_id;
684 cmd->body.ptDepth = mob->pt_level;
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700685 cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100686 cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
687
688 vmw_fifo_commit(dev_priv, sizeof(*cmd));
689
690 return 0;
691
692out_no_cmd_space:
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700693 vmw_fifo_resource_dec(dev_priv);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100694 if (pt_set_up)
695 ttm_bo_unref(&mob->pt_bo);
696
697 return -ENOMEM;
698}