drm/radeon: apply Murphy's law to the kms irq code v3

1. It is really dangerous to have more than one
   spinlock protecting the same information.

2. radeon_irq_set sometimes wasn't called with lock
   protection, so it can happen that more than one
   CPU would tamper with the irq regs at the same
   time.

3. The pm.gui_idle variable was assuming that the 3D
   engine wasn't becoming idle between testing the
   register and setting the variable. So just remove
   it and test the register directly.

v2: Also handle the hpd irq code the same way.
v3: Rename hpd parameter for clarification.

Signed-off-by: Christian Koenig <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 5df58d1..c5eb7a1 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -32,6 +32,8 @@
 #include "radeon.h"
 #include "atom.h"
 
+#define RADEON_WAIT_IDLE_TIMEOUT 200
+
 irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
 {
 	struct drm_device *dev = (struct drm_device *) arg;
@@ -62,8 +64,10 @@
 void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
 {
 	struct radeon_device *rdev = dev->dev_private;
+	unsigned long irqflags;
 	unsigned i;
 
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
 	/* Disable *all* interrupts */
 	for (i = 0; i < RADEON_NUM_RINGS; i++)
 		rdev->irq.sw_int[i] = false;
@@ -76,6 +80,7 @@
 		rdev->irq.afmt[i] = false;
 	}
 	radeon_irq_set(rdev);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
 	/* Clear bits */
 	radeon_irq_process(rdev);
 }
@@ -83,23 +88,28 @@
 int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
 {
 	struct radeon_device *rdev = dev->dev_private;
+	unsigned long irqflags;
 	unsigned i;
 
 	dev->max_vblank_count = 0x001fffff;
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
 	for (i = 0; i < RADEON_NUM_RINGS; i++)
 		rdev->irq.sw_int[i] = true;
 	radeon_irq_set(rdev);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
 	return 0;
 }
 
 void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
 {
 	struct radeon_device *rdev = dev->dev_private;
+	unsigned long irqflags;
 	unsigned i;
 
 	if (rdev == NULL) {
 		return;
 	}
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
 	/* Disable *all* interrupts */
 	for (i = 0; i < RADEON_NUM_RINGS; i++)
 		rdev->irq.sw_int[i] = false;
@@ -112,6 +122,7 @@
 		rdev->irq.afmt[i] = false;
 	}
 	radeon_irq_set(rdev);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
 }
 
 static bool radeon_msi_ok(struct radeon_device *rdev)
@@ -168,15 +179,12 @@
 
 int radeon_irq_kms_init(struct radeon_device *rdev)
 {
-	int i;
 	int r = 0;
 
 	INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
 	INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
 
-	spin_lock_init(&rdev->irq.sw_lock);
-	for (i = 0; i < rdev->num_crtc; i++)
-		spin_lock_init(&rdev->irq.pflip_lock[i]);
+	spin_lock_init(&rdev->irq.lock);
 	r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
 	if (r) {
 		return r;
@@ -217,25 +225,25 @@
 {
 	unsigned long irqflags;
 
-	spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
 	if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount[ring] == 1)) {
 		rdev->irq.sw_int[ring] = true;
 		radeon_irq_set(rdev);
 	}
-	spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
 }
 
 void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
 {
 	unsigned long irqflags;
 
-	spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
 	BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount[ring] <= 0);
 	if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount[ring] == 0)) {
 		rdev->irq.sw_int[ring] = false;
 		radeon_irq_set(rdev);
 	}
-	spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
 }
 
 void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
@@ -245,12 +253,12 @@
 	if (crtc < 0 || crtc >= rdev->num_crtc)
 		return;
 
-	spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags);
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
 	if (rdev->ddev->irq_enabled && (++rdev->irq.pflip_refcount[crtc] == 1)) {
 		rdev->irq.pflip[crtc] = true;
 		radeon_irq_set(rdev);
 	}
-	spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
 }
 
 void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
@@ -260,12 +268,76 @@
 	if (crtc < 0 || crtc >= rdev->num_crtc)
 		return;
 
-	spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags);
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
 	BUG_ON(rdev->ddev->irq_enabled && rdev->irq.pflip_refcount[crtc] <= 0);
 	if (rdev->ddev->irq_enabled && (--rdev->irq.pflip_refcount[crtc] == 0)) {
 		rdev->irq.pflip[crtc] = false;
 		radeon_irq_set(rdev);
 	}
-	spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
 }
 
+void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
+	rdev->irq.afmt[block] = true;
+	radeon_irq_set(rdev);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+
+}
+
+void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
+	rdev->irq.afmt[block] = false;
+	radeon_irq_set(rdev);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+}
+
+void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
+{
+	unsigned long irqflags;
+	int i;
+
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
+	for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
+		rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
+	radeon_irq_set(rdev);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+}
+
+void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
+{
+	unsigned long irqflags;
+	int i;
+
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
+	for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
+		rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
+	radeon_irq_set(rdev);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+}
+
+int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev)
+{
+	unsigned long irqflags;
+	int r;
+
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
+	rdev->irq.gui_idle = true;
+	radeon_irq_set(rdev);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+
+	r = wait_event_timeout(rdev->irq.idle_queue, radeon_gui_idle(rdev),
+			       msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
+
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
+	rdev->irq.gui_idle = false;
+	radeon_irq_set(rdev);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+	return r;
+}