[PATCH] md: fix deadlock due to md thread processing delayed requests.

Before completing a 'write' the md superblock might need to be updated.
This is best done by the md_thread.

The current code schedules this up and queues the write request for later
handling by the md_thread.

However some personalities (Raid5/raid6) will deadlock if the md_thread
tries to submit requests to its own array.

So this patch changes things so the processes submitting the request waits
for the superblock to be written and then submits the request itself.

This fixes a recently-created deadlock in raid5/raid6

Signed-off-by: Neil Brown <neilb@cse.unsw.edu.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 789b114..7075beb 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -224,8 +224,8 @@
 	INIT_LIST_HEAD(&new->all_mddevs);
 	init_timer(&new->safemode_timer);
 	atomic_set(&new->active, 1);
-	bio_list_init(&new->write_list);
 	spin_lock_init(&new->write_lock);
+	init_waitqueue_head(&new->sb_wait);
 
 	new->queue = blk_alloc_queue(GFP_KERNEL);
 	if (!new->queue) {
@@ -1307,6 +1307,7 @@
 	if (!mddev->persistent) {
 		mddev->sb_dirty = 0;
 		spin_unlock(&mddev->write_lock);
+		wake_up(&mddev->sb_wait);
 		return;
 	}
 	spin_unlock(&mddev->write_lock);
@@ -1348,6 +1349,7 @@
 	}
 	mddev->sb_dirty = 0;
 	spin_unlock(&mddev->write_lock);
+	wake_up(&mddev->sb_wait);
 
 }
 
@@ -3368,29 +3370,26 @@
 
 /* md_write_start(mddev, bi)
  * If we need to update some array metadata (e.g. 'active' flag
- * in superblock) before writing, queue bi for later writing
- * and return 0, else return 1 and it will be written now
+ * in superblock) before writing, schedule a superblock update
+ * and wait for it to complete.
  */
-int md_write_start(mddev_t *mddev, struct bio *bi)
+void md_write_start(mddev_t *mddev, struct bio *bi)
 {
+	DEFINE_WAIT(w);
 	if (bio_data_dir(bi) != WRITE)
-		return 1;
+		return;
 
 	atomic_inc(&mddev->writes_pending);
-	spin_lock(&mddev->write_lock);
-	if (mddev->in_sync == 0 && mddev->sb_dirty == 0) {
-		spin_unlock(&mddev->write_lock);
-		return 1;
-	}
-	bio_list_add(&mddev->write_list, bi);
-
 	if (mddev->in_sync) {
-		mddev->in_sync = 0;
-		mddev->sb_dirty = 1;
+		spin_lock(&mddev->write_lock);
+		if (mddev->in_sync) {
+			mddev->in_sync = 0;
+			mddev->sb_dirty = 1;
+			md_wakeup_thread(mddev->thread);
+		}
+		spin_unlock(&mddev->write_lock);
 	}
-	spin_unlock(&mddev->write_lock);
-	md_wakeup_thread(mddev->thread);
-	return 0;
+	wait_event(mddev->sb_wait, mddev->sb_dirty==0);
 }
 
 void md_write_end(mddev_t *mddev)
@@ -3685,7 +3684,6 @@
 		mddev->sb_dirty ||
 		test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
 		test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
-		mddev->write_list.head ||
 		(mddev->safemode == 1) ||
 		(mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
 		 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
@@ -3694,7 +3692,6 @@
 
 	if (mddev_trylock(mddev)==0) {
 		int spares =0;
-		struct bio *blist;
 
 		spin_lock(&mddev->write_lock);
 		if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
@@ -3704,21 +3701,11 @@
 		}
 		if (mddev->safemode == 1)
 			mddev->safemode = 0;
-		blist = bio_list_get(&mddev->write_list);
 		spin_unlock(&mddev->write_lock);
 
 		if (mddev->sb_dirty)
 			md_update_sb(mddev);
 
-		while (blist) {
-			struct bio *b = blist;
-			blist = blist->bi_next;
-			b->bi_next = NULL;
-			generic_make_request(b);
-			/* we already counted this, so need to un-count */
-			md_write_end(mddev);
-		}
-
 
 		if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
 		    !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 3f5234f..98b0977 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -561,8 +561,8 @@
 	 * thread has put up a bar for new requests.
 	 * Continue immediately if no resync is active currently.
 	 */
-	if (md_write_start(mddev, bio)==0)
-		return 0;
+	md_write_start(mddev, bio); /* wait on superblock update early */
+
 	spin_lock_irq(&conf->resync_lock);
 	wait_event_lock_irq(conf->wait_resume, !conf->barrier, conf->resync_lock, );
 	conf->nr_pending++;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 8476515..fd7324a 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -700,8 +700,7 @@
 		return 0;
 	}
 
-	if (md_write_start(mddev, bio) == 0)
-		return 0;
+	md_write_start(mddev, bio);
 
 	/*
 	 * Register the new request and wait if the reconstruction
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 1ce3f5a..93a9726 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1411,8 +1411,7 @@
 	sector_t logical_sector, last_sector;
 	struct stripe_head *sh;
 
-	if (md_write_start(mddev, bi)==0)
-		return 0;
+	md_write_start(mddev, bi);
 
 	if (bio_data_dir(bi)==WRITE) {
 		disk_stat_inc(mddev->gendisk, writes);
diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c
index d9c3854..f62ea1a 100644
--- a/drivers/md/raid6main.c
+++ b/drivers/md/raid6main.c
@@ -1570,8 +1570,7 @@
 	sector_t logical_sector, last_sector;
 	struct stripe_head *sh;
 
-	if (md_write_start(mddev, bi)==0)
-		return 0;
+	md_write_start(mddev, bi);
 
 	if (bio_data_dir(bi)==WRITE) {
 		disk_stat_inc(mddev->gendisk, writes);