drm/i915: Add a spinlock to protect the active_list
This is a baby-step in the direction of having finer-grained
locking than the struct_mutex. Specifically, this will enable
new debugging code to read the active list for printing out
GPU state when the GPU is wedged, (while the struct_mutex is
held, of course).
Signed-off-by: Carl Worth <cworth@cworth.org>
[anholt: indentation fix]
Signed-off-by: Eric Anholt <eric@anholt.net>
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9f4eceb..1449b45 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1325,8 +1325,10 @@
obj_priv->active = 1;
}
/* Move from whatever list we were on to the tail of execution. */
+ spin_lock(&dev_priv->mm.active_list_lock);
list_move_tail(&obj_priv->list,
&dev_priv->mm.active_list);
+ spin_unlock(&dev_priv->mm.active_list_lock);
obj_priv->last_rendering_seqno = seqno;
}
@@ -1468,6 +1470,7 @@
/* Move any buffers on the active list that are no longer referenced
* by the ringbuffer to the flushing/inactive lists as appropriate.
*/
+ spin_lock(&dev_priv->mm.active_list_lock);
while (!list_empty(&dev_priv->mm.active_list)) {
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
@@ -1482,7 +1485,7 @@
* this seqno.
*/
if (obj_priv->last_rendering_seqno != request->seqno)
- return;
+ goto out;
#if WATCH_LRU
DRM_INFO("%s: retire %d moves to inactive list %p\n",
@@ -1494,6 +1497,8 @@
else
i915_gem_object_move_to_inactive(obj);
}
+out:
+ spin_unlock(&dev_priv->mm.active_list_lock);
}
/**
@@ -2215,15 +2220,20 @@
}
}
if (obj_priv->gtt_space == NULL) {
+ bool lists_empty;
+
/* If the gtt is empty and we're still having trouble
* fitting our object in, we're out of memory.
*/
#if WATCH_LRU
DRM_INFO("%s: GTT full, evicting something\n", __func__);
#endif
- if (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list)) {
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->mm.active_list));
+ spin_unlock(&dev_priv->mm.active_list_lock);
+ if (lists_empty) {
DRM_ERROR("GTT full, but LRU list empty\n");
return -ENOMEM;
}
@@ -3679,6 +3689,7 @@
i915_gem_retire_requests(dev);
+ spin_lock(&dev_priv->mm.active_list_lock);
if (!dev_priv->mm.wedged) {
/* Active and flushing should now be empty as we've
* waited for a sequence higher than any pending execbuffer
@@ -3705,6 +3716,7 @@
obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
i915_gem_object_move_to_inactive(obj_priv->obj);
}
+ spin_unlock(&dev_priv->mm.active_list_lock);
while (!list_empty(&dev_priv->mm.flushing_list)) {
struct drm_i915_gem_object *obj_priv;
@@ -3953,7 +3965,10 @@
if (ret != 0)
return ret;
+ spin_lock(&dev_priv->mm.active_list_lock);
BUG_ON(!list_empty(&dev_priv->mm.active_list));
+ spin_unlock(&dev_priv->mm.active_list_lock);
+
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
BUG_ON(!list_empty(&dev_priv->mm.request_list));
@@ -3997,6 +4012,7 @@
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ spin_lock_init(&dev_priv->mm.active_list_lock);
INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);