drm/nouveau: Take fence spinlock before reading the last sequence.
It fixes a race between the TTM delayed work queue and the GEM IOCTLs
(fdo bug 29583) uncovered by the BKL removal.
Signed-off-by: Francisco Jerez <currojerez@riseup.net>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 6b208ff..87ac21e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -64,16 +64,17 @@
struct nouveau_fence *fence;
uint32_t sequence;
+ spin_lock(&chan->fence.lock);
+
if (USE_REFCNT)
sequence = nvchan_rd32(chan, 0x48);
else
sequence = atomic_read(&chan->fence.last_sequence_irq);
if (chan->fence.sequence_ack == sequence)
- return;
+ goto out;
chan->fence.sequence_ack = sequence;
- spin_lock(&chan->fence.lock);
list_for_each_safe(entry, tmp, &chan->fence.pending) {
fence = list_entry(entry, struct nouveau_fence, entry);
@@ -85,6 +86,7 @@
if (sequence == chan->fence.sequence_ack)
break;
}
+out:
spin_unlock(&chan->fence.lock);
}