drm/i915: Defer accounting until read from debugfs
Simply remove our accounting of objects inside the aperture, keeping
only track of what is in the aperture and its current usage. This
removes the over-complication of BUGs that were attempting to keep the
accounting correct and also removes the overhead of the accounting on
the hot-paths.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 5faae47..3c9d4b8 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -128,8 +128,15 @@
if (obj->gtt_space != NULL)
seq_printf(m, " (gtt offset: %08x, size: %08x)",
obj->gtt_offset, (unsigned int)obj->gtt_space->size);
- if (obj->pin_mappable || obj->fault_mappable)
- seq_printf(m, " (mappable)");
+ if (obj->pin_mappable || obj->fault_mappable) {
+ char s[3], *t = s;
+ if (obj->pin_mappable)
+ *t++ = 'p';
+ if (obj->fault_mappable)
+ *t++ = 'f';
+ *t = '\0';
+ seq_printf(m, " (%s mappable)", s);
+ }
if (obj->ring != NULL)
seq_printf(m, " (%s)", obj->ring->name);
}
@@ -191,28 +198,79 @@
return 0;
}
+#define count_objects(list, member) do { \
+ list_for_each_entry(obj, list, member) { \
+ size += obj->gtt_space->size; \
+ ++count; \
+ if (obj->map_and_fenceable) { \
+ mappable_size += obj->gtt_space->size; \
+ ++mappable_count; \
+ } \
+ } \
+} while(0)
+
static int i915_gem_object_info(struct seq_file *m, void* data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 count, mappable_count;
+ size_t size, mappable_size;
+ struct drm_i915_gem_object *obj;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- seq_printf(m, "%u objects\n", dev_priv->mm.object_count);
- seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory);
- seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count);
- seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory);
- seq_printf(m, "%u mappable objects in gtt\n", dev_priv->mm.gtt_mappable_count);
- seq_printf(m, "%zu mappable gtt bytes\n", dev_priv->mm.gtt_mappable_memory);
- seq_printf(m, "%zu mappable gtt used bytes\n", dev_priv->mm.mappable_gtt_used);
- seq_printf(m, "%zu mappable gtt total\n", dev_priv->mm.mappable_gtt_total);
- seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count);
- seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory);
- seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total);
+ seq_printf(m, "%u objects, %zu bytes\n",
+ dev_priv->mm.object_count,
+ dev_priv->mm.object_memory);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.gtt_list, gtt_list);
+ seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.active_list, mm_list);
+ count_objects(&dev_priv->mm.flushing_list, mm_list);
+ seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.pinned_list, mm_list);
+ seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.inactive_list, mm_list);
+ seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.deferred_free_list, mm_list);
+ seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ if (obj->fault_mappable) {
+ size += obj->gtt_space->size;
+ ++count;
+ }
+ if (obj->pin_mappable) {
+ mappable_size += obj->gtt_space->size;
+ ++mappable_count;
+ }
+ }
+ seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
+ mappable_count, mappable_size);
+ seq_printf(m, "%u fault mappable objects, %zu bytes\n",
+ count, size);
+
+ seq_printf(m, "%zu [%zu] gtt total\n",
+ dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b6ca10a..4ad34f9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -642,17 +642,10 @@
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
/* accounting, useful for userland debugging */
- size_t object_memory;
- size_t pin_memory;
- size_t gtt_memory;
- size_t gtt_mappable_memory;
- size_t mappable_gtt_used;
- size_t mappable_gtt_total;
size_t gtt_total;
+ size_t mappable_gtt_total;
+ size_t object_memory;
u32 object_count;
- u32 pin_count;
- u32 gtt_mappable_count;
- u32 gtt_count;
} mm;
struct sdvo_device_mapping sdvo_mappings[2];
/* indicate whether the LVDS_BORDER should be enabled or not */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f6167c5..7507a86 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -83,80 +83,6 @@
dev_priv->mm.object_memory -= size;
}
-static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
- struct drm_i915_gem_object *obj)
-{
- dev_priv->mm.gtt_count++;
- dev_priv->mm.gtt_memory += obj->gtt_space->size;
- if (obj->gtt_offset < dev_priv->mm.gtt_mappable_end) {
- dev_priv->mm.mappable_gtt_used +=
- min_t(size_t, obj->gtt_space->size,
- dev_priv->mm.gtt_mappable_end - obj->gtt_offset);
- }
- list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
-}
-
-static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
- struct drm_i915_gem_object *obj)
-{
- dev_priv->mm.gtt_count--;
- dev_priv->mm.gtt_memory -= obj->gtt_space->size;
- if (obj->gtt_offset < dev_priv->mm.gtt_mappable_end) {
- dev_priv->mm.mappable_gtt_used -=
- min_t(size_t, obj->gtt_space->size,
- dev_priv->mm.gtt_mappable_end - obj->gtt_offset);
- }
- list_del_init(&obj->gtt_list);
-}
-
-/**
- * Update the mappable working set counters. Call _only_ when there is a change
- * in one of (pin|fault)_mappable and update *_mappable _before_ calling.
- * @mappable: new state the changed mappable flag (either pin_ or fault_).
- */
-static void
-i915_gem_info_update_mappable(struct drm_i915_private *dev_priv,
- struct drm_i915_gem_object *obj,
- bool mappable)
-{
- if (mappable) {
- if (obj->pin_mappable && obj->fault_mappable)
- /* Combined state was already mappable. */
- return;
- dev_priv->mm.gtt_mappable_count++;
- dev_priv->mm.gtt_mappable_memory += obj->gtt_space->size;
- } else {
- if (obj->pin_mappable || obj->fault_mappable)
- /* Combined state still mappable. */
- return;
- dev_priv->mm.gtt_mappable_count--;
- dev_priv->mm.gtt_mappable_memory -= obj->gtt_space->size;
- }
-}
-
-static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
- struct drm_i915_gem_object *obj,
- bool mappable)
-{
- dev_priv->mm.pin_count++;
- dev_priv->mm.pin_memory += obj->gtt_space->size;
- if (mappable) {
- obj->pin_mappable = true;
- i915_gem_info_update_mappable(dev_priv, obj, true);
- }
-}
-
-static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
- struct drm_i915_gem_object *obj)
-{
- dev_priv->mm.pin_count--;
- dev_priv->mm.pin_memory -= obj->gtt_space->size;
- if (obj->pin_mappable) {
- obj->pin_mappable = false;
- i915_gem_info_update_mappable(dev_priv, obj, false);
- }
-}
-
int
i915_gem_check_is_wedged(struct drm_device *dev)
{
@@ -253,19 +179,24 @@
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_get_aperture *args = data;
+ struct drm_i915_gem_object *obj;
+ size_t pinned;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
+ pinned = 0;
mutex_lock(&dev->struct_mutex);
- args->aper_size = dev_priv->mm.gtt_total;
- args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
+ list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
+ pinned += obj->gtt_space->size;
mutex_unlock(&dev->struct_mutex);
+ args->aper_size = dev_priv->mm.gtt_total;
+ args->aper_available_size = args->aper_size -pinned;
+
return 0;
}
-
/**
* Creates a new mm object and returns a handle to it.
*/
@@ -1267,14 +1198,12 @@
/* Now bind it into the GTT if needed */
mutex_lock(&dev->struct_mutex);
- BUG_ON(obj->pin_count && !obj->pin_mappable);
if (!obj->map_and_fenceable) {
ret = i915_gem_object_unbind(obj);
if (ret)
goto unlock;
}
-
if (!obj->gtt_space) {
ret = i915_gem_object_bind_to_gtt(obj, 0, true);
if (ret)
@@ -1285,11 +1214,6 @@
if (ret)
goto unlock;
- if (!obj->fault_mappable) {
- obj->fault_mappable = true;
- i915_gem_info_update_mappable(dev_priv, obj, true);
- }
-
/* Need a new fence register? */
if (obj->tiling_mode != I915_TILING_NONE) {
ret = i915_gem_object_get_fence_reg(obj, true);
@@ -1300,6 +1224,8 @@
if (i915_gem_object_is_inactive(obj))
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+ obj->fault_mappable = true;
+
pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
page_offset;
@@ -1406,18 +1332,14 @@
void
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ if (!obj->fault_mappable)
+ return;
- if (unlikely(obj->base.map_list.map && dev->dev_mapping))
- unmap_mapping_range(dev->dev_mapping,
- (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
- obj->base.size, 1);
+ unmap_mapping_range(obj->base.dev->dev_mapping,
+ (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
+ obj->base.size, 1);
- if (obj->fault_mappable) {
- obj->fault_mappable = false;
- i915_gem_info_update_mappable(dev_priv, obj, false);
- }
+ obj->fault_mappable = false;
}
static void
@@ -2221,8 +2143,6 @@
int
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
if (obj->gtt_space == NULL)
@@ -2259,10 +2179,9 @@
i915_gem_clear_fence_reg(obj);
i915_gem_gtt_unbind_object(obj);
-
i915_gem_object_put_pages_gtt(obj);
- i915_gem_info_remove_gtt(dev_priv, obj);
+ list_del_init(&obj->gtt_list);
list_del_init(&obj->mm_list);
/* Avoid an unnecessary call to unbind on rebind. */
obj->map_and_fenceable = true;
@@ -2833,11 +2752,8 @@
goto search_free;
}
- obj->gtt_offset = obj->gtt_space->start;
-
- /* keep track of bounds object by adding it to the inactive list */
+ list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
- i915_gem_info_add_gtt(dev_priv, obj);
/* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in
@@ -2846,7 +2762,7 @@
BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
- trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
+ obj->gtt_offset = obj->gtt_space->start;
fenceable =
obj->gtt_space->size == fence_size &&
@@ -2857,6 +2773,7 @@
obj->map_and_fenceable = mappable && fenceable;
+ trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
return 0;
}
@@ -4372,12 +4289,11 @@
}
if (obj->pin_count++ == 0) {
- i915_gem_info_add_pin(dev_priv, obj, map_and_fenceable);
if (!obj->active)
list_move_tail(&obj->mm_list,
&dev_priv->mm.pinned_list);
}
- BUG_ON(!obj->pin_mappable && map_and_fenceable);
+ obj->pin_mappable |= map_and_fenceable;
WARN_ON(i915_verify_lists(dev));
return 0;
@@ -4397,7 +4313,7 @@
if (!obj->active)
list_move_tail(&obj->mm_list,
&dev_priv->mm.inactive_list);
- i915_gem_info_remove_pin(dev_priv, obj);
+ obj->pin_mappable = false;
}
WARN_ON(i915_verify_lists(dev));
}