drm/i915: Create VMAs
Formerly: "drm/i915: Create VMAs (part 1)"
In a previous patch, the notion of a VM was introduced. A VMA describes
an area of part of the VM address space. A VMA is similar to the concept
in the linux mm. However, instead of representing regular memory, a VMA
is backed by a GEM BO. There may be many VMAs for a given object, one
for each VM the object is to be used in. This may occur through flink,
dma-buf, or a number of other transient states.
Currently the code depends on only 1 VMA per object, for the global GTT
(and aliasing PPGTT). The following patches will address this and make
the rest of the infrastructure more suited
v2: s/i915_obj/i915_gem_obj (Chris)
v3: Only move an object to the now global unbound list if there are no
more VMAs for the object which are bound into a VM (ie. the list is
empty).
v4: killed obj->gtt_space
some reworks due to rebase
v5: Free vma on error path (Imre)
v6: Another missed vma free in i915_gem_object_bind_to_gtt error path
(Imre)
Fixed vma freeing in stolen preallocation (Imre)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Reviewed-by: Imre Deak <imre.deak@intel.com>
[danvet: Squash in fixup from Ben to not deref a non-existing vma in
set_cache_level, reported by Chris.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8830856..bd8c008 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2587,6 +2587,7 @@
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+ struct i915_vma *vma;
int ret;
if (!i915_gem_obj_ggtt_bound(obj))
@@ -2624,11 +2625,20 @@
i915_gem_object_unpin_pages(obj);
list_del(&obj->mm_list);
- list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
/* Avoid an unnecessary call to unbind on rebind. */
obj->map_and_fenceable = true;
- drm_mm_remove_node(&obj->gtt_space);
+ vma = __i915_gem_obj_to_vma(obj);
+ list_del(&vma->vma_link);
+ drm_mm_remove_node(&vma->node);
+ i915_gem_vma_destroy(vma);
+
+ /* Since the unbound list is global, only move to that list if
+ * no more VMAs exist.
+ * NB: Until we have real VMAs there will only ever be one */
+ WARN_ON(!list_empty(&obj->vma_list));
+ if (list_empty(&obj->vma_list))
+ list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
return 0;
}
@@ -3079,8 +3089,12 @@
bool mappable, fenceable;
size_t gtt_max = map_and_fenceable ?
dev_priv->gtt.mappable_end : dev_priv->gtt.base.total;
+ struct i915_vma *vma;
int ret;
+ if (WARN_ON(!list_empty(&obj->vma_list)))
+ return -EBUSY;
+
fence_size = i915_gem_get_gtt_size(dev,
obj->base.size,
obj->tiling_mode);
@@ -3119,9 +3133,15 @@
i915_gem_object_pin_pages(obj);
+ vma = i915_gem_vma_create(obj, &dev_priv->gtt.base);
+ if (vma == NULL) {
+ i915_gem_object_unpin_pages(obj);
+ return -ENOMEM;
+ }
+
search_free:
ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
- &obj->gtt_space,
+ &vma->node,
size, alignment,
obj->cache_level, 0, gtt_max);
if (ret) {
@@ -3132,25 +3152,21 @@
if (ret == 0)
goto search_free;
- i915_gem_object_unpin_pages(obj);
- return ret;
+ goto err_out;
}
- if (WARN_ON(!i915_gem_valid_gtt_space(dev, &obj->gtt_space,
+ if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
obj->cache_level))) {
- i915_gem_object_unpin_pages(obj);
- drm_mm_remove_node(&obj->gtt_space);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_out;
}
ret = i915_gem_gtt_prepare_object(obj);
- if (ret) {
- i915_gem_object_unpin_pages(obj);
- drm_mm_remove_node(&obj->gtt_space);
- return ret;
- }
+ if (ret)
+ goto err_out;
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &vm->inactive_list);
+ list_add(&vma->vma_link, &obj->vma_list);
fenceable =
i915_gem_obj_ggtt_size(obj) == fence_size &&
@@ -3164,6 +3180,12 @@
trace_i915_gem_object_bind(obj, map_and_fenceable);
i915_gem_verify_gtt(dev);
return 0;
+
+err_out:
+ i915_gem_vma_destroy(vma);
+ i915_gem_object_unpin_pages(obj);
+ drm_mm_remove_node(&vma->node);
+ return ret;
}
void
@@ -3309,6 +3331,7 @@
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
int ret;
if (obj->cache_level == cache_level)
@@ -3319,7 +3342,7 @@
return -EBUSY;
}
- if (!i915_gem_valid_gtt_space(dev, &obj->gtt_space, cache_level)) {
+ if (vma && !i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
ret = i915_gem_object_unbind(obj);
if (ret)
return ret;
@@ -3864,6 +3887,7 @@
INIT_LIST_HEAD(&obj->global_list);
INIT_LIST_HEAD(&obj->ring_list);
INIT_LIST_HEAD(&obj->exec_list);
+ INIT_LIST_HEAD(&obj->vma_list);
obj->ops = ops;
@@ -3984,6 +4008,26 @@
i915_gem_object_free(obj);
}
+struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ if (vma == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&vma->vma_link);
+ vma->vm = vm;
+ vma->obj = obj;
+
+ return vma;
+}
+
+void i915_gem_vma_destroy(struct i915_vma *vma)
+{
+ WARN_ON(vma->node.allocated);
+ kfree(vma);
+}
+
int
i915_gem_idle(struct drm_device *dev)
{